message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
tests: when using pytest mark decorators ensure all fixtures are defined
Decorating a test method directly with a pytest mark seems to break if
the test function does not explicitly define all pytest fixtures it
expects to recieve. | @@ -15,7 +15,7 @@ class TestInstall(object):
assert File(node["conf_path"]).is_file
@pytest.mark.no_docker
- def test_ceph_command_exists(self, Command):
+ def test_ceph_command_exists(self, Command, node):
assert Command.exists("ceph")
|
Make `streamlit help` not crash on first run if unable to load files
from the web. | @@ -191,10 +191,14 @@ def display_reference():
except urllib.error.URLError:
st.error(f'Unable to load file from {image_url}. '
'Is the internet connected?')
+ except Exception as e:
+ st.exception(e)
+ return None
image_url = 'https://images.fineartamerica.com/images/artworkimages/mediumlarge/1/serene-sunset-robert-bynum.jpg'
image_bytes = read_file_from_url(image_url)
+ if image_bytes is not None:
with st.echo():
image = Image.open(BytesIO(image_bytes))
@@ -215,6 +219,7 @@ def display_reference():
browsers. Below is an example of an _ogg_-formatted file:
''')
+ if audio_bytes is not None:
with st.echo():
st.audio(audio_bytes, format='audio/ogg')
@@ -228,6 +233,7 @@ def display_reference():
video_url = 'https://www.sample-videos.com/video/mp4/480/big_buck_bunny_480p_2mb.mp4'
video_bytes = read_file_from_url(video_url)
+ if video_bytes is not None:
with st.echo():
st.video(video_bytes, format='video/webm')
|
api/common/ColorLight: add blink and animate
These are the generalizations of the corresponding methods for single color lights. | @@ -450,26 +450,37 @@ class ColorLight:
"""Turns off the light."""
pass
- def pattern(self, pattern, duration):
- """Makes the light follow a color pattern as a function of time.
+ def blink(self, color, durations):
+ """Blinks the light at a given color by turning it on and off for given
+ durations.
- The specified pattern function will be sampled at 64 points between 0
- and the specified duration. The light will be held constant between
- samples. After the given duration, the pattern repeats.
+ The light keeps blinking indefinitely while the rest of your
+ program keeps running.
+
+ This method provides a simple way to make basic but useful patterns.
+ For more generic and multi-color patterns, use :meth:`.animate`
+ instead.
+
+ Arguments:
+ color (Color): Color of the light.
+ (list): List of (:ref:`time`) values of the
+ form ``[on_1, off_1, on_2, off_2, ...]``.
+ """
+
+ def animate(self, colors, interval):
+ """Animates the light with a list of colors. The next
+ color in the list is shown after the given interval.
+
+ The animation runs in the background while the rest of your program
+ keeps running. When the animation completes, it repeats.
Arguments:
- pattern (callable): Function of the
- form ``h, s, v = func(t)`` that returns a tuple of ``h``,
- ``s``, and ``v`` as a function of time ``t`` in milliseconds.
- A function of the
- form :class:`col <.parameters.Color>` ``= func(t)`` is also
- allowed.
- time (:ref:`time`): Duration of the pattern.
+ colors (list): List of :ref:`color` values.
+ interval (:ref:`time`): Time between color updates.
"""
def reset(self):
- """Resets the light to the default system behavior."""
- # This method is exposed on system lights only.
+ """Resets the light to the default system behavior or animation."""
pass
|
Sparse and Dense vector fields
Closes | @@ -297,6 +297,16 @@ class Float(Field):
def _deserialize(self, data):
return float(data)
+class DenseVector(Float):
+ name = 'dense_vector'
+
+ def __init__(self, dims, **kwargs):
+ kwargs["multi"] = True
+ super(DenseVector, self).__init__(dims=dims, **kwargs)
+
+class SparseVector(Field):
+ name = 'sparse_vector'
+
class HalfFloat(Float):
name = 'half_float'
|
Prevent download_profile_photo from downloading arbitrary files
First of all, because it shouldn't be doing that. Second, it was
buggy and was passing the tuple returned by get_input_location to
download_file which doesn't accept tuples (instead it should be
passed the photo object so that download_file could return dc_id
and input file location itself). | @@ -78,9 +78,10 @@ class DownloadMethods(UserMethods):
if isinstance(photo, (types.UserProfilePhoto, types.ChatPhoto)):
loc = photo.photo_big if download_big else photo.photo_small
else:
- try:
- loc = utils.get_input_location(photo)
- except TypeError:
+ # It doesn't make any sense to check if `photo` can be used
+ # as input location, because then this method would be able
+ # to "download the profile photo of a message", i.e. its
+ # media which should be done with `download_media` instead.
return None
file = self._get_proper_filename(
|
Slack importer: Disable often-breaking test in CI.
This test randomly fails far too often in Travis -- I think more than
all our other tests combined. It needs to be fixed before we can ask
everyone to look at build failures it causes. | @@ -26,7 +26,6 @@ set -x
./tools/test-run-dev
./tools/test-queue-worker-reload
-./tools/test-slack-importer
# NB: Everything here should be in `tools/test-all`. If there's a
# reason not to run it there, it should be there as a comment
# explaining why.
|
Code block: clarify get_instructions's docstring
It wasn't clear that it also parses the message content. | @@ -147,7 +147,11 @@ def _get_no_lang_message(content: str) -> Optional[str]:
def get_instructions(content: str) -> Optional[str]:
- """Return code block formatting instructions for `content` or None if nothing's wrong."""
+ """
+ Parse `content` and return code block formatting instructions if something is wrong.
+
+ Return None if `content` lacks code block formatting issues.
+ """
log.trace("Getting formatting instructions.")
blocks = parsing.find_code_blocks(content)
|
Updates default idle='Gi' => idle=() argument in filter_circuit(s)
Another update as we get rid of 'Gi' references in favor of an
"empty layer" idle. | @@ -756,7 +756,7 @@ def manipulate_circuit_list(circuitList, sequenceRules, line_labels="auto"):
return [ manipulate_circuit(opstr, sequenceRules, line_labels) for opstr in circuitList ]
-def filter_circuits(circuits, sslbls_to_keep, new_sslbls=None, drop=False, idle='Gi'):
+def filter_circuits(circuits, sslbls_to_keep, new_sslbls=None, drop=False, idle=() ):
"""
Removes any labels from `circuits` whose state-space labels are not
entirely in `sslbls_to_keep`. If a gates label's state-space labels
@@ -805,7 +805,7 @@ def filter_circuits(circuits, sslbls_to_keep, new_sslbls=None, drop=False, idle=
return [filter_circuit(s,sslbls_to_keep,new_sslbls,idle) for s in circuits]
-def filter_circuit(circuit, sslbls_to_keep, new_sslbls=None, idle='Gi'):
+def filter_circuit(circuit, sslbls_to_keep, new_sslbls=None, idle=() ):
"""
Removes any labels from `circuit` whose state-space labels are not
entirely in `sslbls_to_keep`. If a gates label's state-space labels
|
Add resilienceproject to allowed redirect URIs
In the future, a more programmatic method for enabling
iOS/Android projects should be implemented. | @@ -470,7 +470,11 @@ OAUTH2_PROVIDER = {
},
'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60 * 30,
'REQUEST_APPROVAL_PROMPT': 'auto',
- 'ALLOWED_REDIRECT_URI_SCHEMES': ['http', 'https', 'openhumanshk'],
+ 'ALLOWED_REDIRECT_URI_SCHEMES': [
+ 'http', 'https',
+ # Redirect URIs that are using iOS or Android app-registered schema
+ 'openhumanshk', 'resilienceproject',
+ ],
}
REST_FRAMEWORK = {
|
[doc] Show current decommissions within HISTORY.rst
Bot owner are informed about current decommissions
with this new list. | Release history
===============
+Current decommissions
+---------------------
+
+* 3.0.20200405: Site and Page methods deprecated for 10 years or longer will be removed
+* 3.0.20200405: Usage of SkipPageError with BaseBot will be removed
+* 3.0.20200326: Functions dealing with stars list may be removed
+* 3.0.20200306: Support of MediaWiki releases below 1.19 will be dropped (T245350)
+* 3.0.20200306: tools.ip will be dropped in favour of tools.is_IP (T243171)
+* 3.0.20200306: tools.ip_regexp will be removed in next release
+* 3.0.20200111: Support for Python 3.4 will be dropped (T239542)
+* 3.0.20190722: test_family will be removed (T228375, T228300)
+* 3.0.20190722: Deprecation warning: support for Python 2 will be dropped in 4/2020 (T213287)
+
Current release
---------------
|
Sets mapping_method to claim for github
Set the mapping method to claim like it is set for the deployment.
Mapping method true is invalid and the ansible playbook will error out. | @@ -26,7 +26,7 @@ openshift_master_default_subdomain={{ wildcard_zone }}
osm_default_node_selector="role=app"
deployment_type={{ deployment_type | default('openshift-enterprise') }}
os_sdn_network_plugin_name={{ openshift_sdn | default('redhat/openshift-ovs-subnet') }}
-openshift_master_identity_providers=[{'name': 'github', 'challenge': 'false', 'login': 'true', 'kind': 'GitHubIdentityProvider', 'mapping_method': 'true', 'clientID': '{{ github_client_id }}', 'clientSecret': '{{ github_client_secret }}', 'organizations': {'['{{ github_organization }}']'}}]
+openshift_master_identity_providers=[{'name': 'github', 'challenge': 'false', 'login': 'true', 'kind': 'GitHubIdentityProvider', 'mapping_method': 'claim', 'clientID': '{{ github_client_id }}', 'clientSecret': '{{ github_client_secret }}', 'organizations': {'['{{ github_organization }}']'}}]
osm_use_cockpit=true
containerized={{ containerized | default('false') }}
openshift_hosted_registry_storage_kind=object
|
ArnoldShaderUITest : check behaviour for registered plug metadata
When registering metadata for a specific plug, it shouldn't override the
metadata lookup on the base class for another plug. | @@ -190,6 +190,15 @@ root["SceneWriter"].execute()
self.assertEqual( parms["filename"].value, "overrideUserDefault" )
self.assertEqual( parms["filter"].value, "bilinear" )
+ def testBaseClassMetadataLookup( self ) :
+
+ surface = GafferArnold.ArnoldShader()
+ surface.loadShader( "standard_surface" )
+
+ # Make sure that metadata registration based on mechanism in GafferScene.ShaderUI works
+ Gaffer.Metadata.registerValue( "ai:surface:standard_surface:aov_id1", "userDefault", "id_1" )
+
+ self.assertEqual( Gaffer.Metadata.value( surface["parameters"]["aov_id1"], "userDefault" ), "id_1" )
if __name__ == "__main__":
unittest.main()
|
mmctl corrections
* mmctl corrections
Documentation for:
Applied corrections to mmctl team users add and mmctl channel create child command documentation
* updated mmctl team users delete | @@ -937,14 +937,14 @@ Create a channel.
.. code-block:: sh
- channel create --team myteam --name mynewchannel --display_name "My New Channel"
- channel create --team myteam --name mynewprivatechannel --display_name "My New Private Channel" --private
+ channel create --team myteam --name mynewchannel --display-name "My New Channel"
+ channel create --team myteam --name mynewprivatechannel --display-name "My New Private Channel" --private
**Options**
.. code-block:: sh
- --display_name string Channel Display Name
+ --display-name string Channel Display Name
--header string Channel header
-h, --help help for create
--name string Channel Name
@@ -5160,7 +5160,7 @@ Add specified users to a team.
.. code-block:: sh
- team add myteam [email protected] username
+ team users add myteam [email protected] username
**Options**
@@ -5198,7 +5198,7 @@ Remove specified users from a team.
.. code-block:: sh
- team remove myteam [email protected] username
+ team users remove myteam [email protected] username
**Options**
|
Default 'is_carousel_bumped_post'
Setting default
'is_carousel_bumped_post': 'false'
TODO: understand when it's 'true' | @@ -657,7 +657,8 @@ class API(object):
data = self.action_data({
'media_id': media_id,
'container_module': container_module,
- 'feed_position': feed_position})
+ 'feed_position': feed_position,
+ 'is_carousel_bumped_post': 'false'})
if container_module == 'feed_timeline':
data.update({'inventory_source': 'media_or_ad'})
if username:
|
Update recipes/jsoncpp/all/conanfile.py
remove redundant tools.Version call | @@ -39,7 +39,7 @@ class JsoncppConan(ConanFile):
tools.replace_in_file(os.path.join(self._source_subfolder, "src", "lib_json", "CMakeLists.txt"),
"set_target_properties( jsoncpp_lib PROPERTIES POSITION_INDEPENDENT_CODE ON)",
"set_target_properties( jsoncpp_lib PROPERTIES POSITION_INDEPENDENT_CODE OFF)")
- if tools.Version(self.version) > tools.Version("1.9.0"):
+ if tools.Version(self.version) > "1.9.0":
tools.replace_in_file(os.path.join(self._source_subfolder, "src", "lib_json", "CMakeLists.txt"),
"$<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/include/json>",
"")
|
ci: only reject once and fix dismiss
`Ana06/[email protected]` is a fork of
which
fixes `DISMISS` and provides an `allow_duplicate` option which allows to
only approve once. | @@ -25,13 +25,14 @@ jobs:
echo $FILES | grep -qF 'CHANGELOG.md' || echo $PR_BODY | grep -qiF "$NO_CHANGELOG"
- name: Reject pull request if no CHANGELOG update
if: ${{ always() && steps.changelog_updated.outcome == 'failure' }}
- uses: andrewmusgrave/[email protected]
+ uses: Ana06/[email protected]
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
event: REQUEST_CHANGES
body: "Please add bug fixes, new features, breaking changes and anything else you think is worthwhile mentioning to the `master (unreleased)` section of CHANGELOG.md. If no CHANGELOG update is needed add the following to the PR description: `${{ env.NO_CHANGELOG }}`"
+ allow_duplicate: false
- name: Dismiss previous review if CHANGELOG update
- uses: andrewmusgrave/[email protected]
+ uses: Ana06/[email protected]
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
event: DISMISS
|
Removed termsAgreement (leftover from vf)
also changed UX to match instantschools' (Talked about with Khang and Christian) | autocomplete="new-password"
required />
- <icon-button :disabled="canSubmit" id="submit" :primary="true" :text="$tr('finish')" type="submit" />
+ <icon-button :disabled="busy" id="submit" :primary="true" :text="$tr('finish')" type="submit" />
</form>
username: '',
password: '',
confirmed_password: '',
- termsAgreement: false,
}),
computed: {
signInPage() {
return this.errorCode === 400;
},
allFieldsPopulated() {
- return !(this.name && this.username && this.password && this.confirmed_password);
- },
- canSubmit() {
- return !this.termsAgreement || this.allFieldsPopulated || !this.passwordsMatch || this.busy;
+ return this.name && this.username && this.password && this.confirmed_password;
},
errorMessage() {
return this.backendErrorMessage || this.$tr('genericError');
},
methods: {
signUp() {
+ const canSubmit =
+ this.allFieldsPopulated &&
+ this.passwordsMatch &&
+ !this.busy;
+
+ if (canSubmit) {
this.signUpAction({
full_name: this.name,
username: this.username,
password: this.password,
});
+ }
},
},
vuex: {
|
Provide a better `repr` for `Node` and `Inputs`
Useful for debugging purposes. I got hit with non-understandable output
for failing tests, and these `repr` implementations help a lot with
this. | @@ -47,6 +47,10 @@ class Inputs(MM):
def __len__(self):
return self.target._in_edges.__len__()
+ def __repr__(self):
+ return repr("<{0.__module__}.{0.__name__}: {1!r}>"
+ .format(type(self), dict(self)))
+
class Outputs(UD):
""" Helper that intercepts modifications to update `Inputs` symmetrically.
@@ -254,6 +258,10 @@ class Node:
def __str__(self):
return str(self.label)
+ def __repr__(self):
+ return repr("<{0.__module__}.{0.__name__}: {1!r}>"
+ .format(type(self), self.label))
+
@property
def label(self):
""" object :
|
Set "entry.qualified_name" for builtin types.
Fixes | @@ -1241,12 +1241,14 @@ class ModuleScope(Scope):
entry.is_builtin = 1
entry.is_const = 1 # cached
entry.name = name
+ entry.qualified_name = '__builtin__.' + name
entry.cname = Naming.builtin_prefix + name
self.cached_builtins.append(entry)
self.undeclared_cached_builtins.append(entry)
else:
entry.is_builtin = 1
entry.name = name
+ entry.qualified_name = '__builtin__.' + name
return entry
def find_module(self, module_name, pos, relative_level=-1):
|
Cosmetic changes in the comments
I verified that Octopart ignores spaces inside the manf# | @@ -109,7 +109,6 @@ def log_response(text):
f.write(text + '\n')
-# Change the logging print channel to `tqdm` to keep the process bar to the end of terminal.
class TqdmLoggingHandler(logging.Handler):
'''Overload the class to write the logging through the `tqdm`.'''
def __init__(self, level=logging.NOTSET):
@@ -176,7 +175,8 @@ class api_partinfo_kitspace(distributor_class):
query_type = re.sub(r'\{DISTRIBUTORS\}', '["' + '","'.join(sorted(distributors)) + '"]', query_type)
# r = requests.post(url, {"query": QUERY_SEARCH, "variables": variables}) #TODO future use for ISSUE #17
variables = '{"input":[' + ','.join(query_parts) + ']}'
- # TODO: REALLY???!!!! This removes spaces in mnf# codes
+ # Remove all spaces, even inside the manf#
+ # SET comment: this is how the code always worked. Octopart (used by KitSpace) ignores spaces inside manf# codes.
variables = variables.replace(' ', '')
# Do the query using POST
data = 'query={}&variables={}'.format(quote_plus(query_type), quote_plus(variables))
@@ -336,6 +336,7 @@ class api_partinfo_kitspace(distributor_class):
'''Fill-in the parts with price/qty/etc info from KitSpace.'''
gv.logger.log(DEBUG_OVERVIEW, '# Getting part data from KitSpace...')
+ # Change the logging print channel to `tqdm` to keep the process bar to the end of terminal.
# Get handles to default sys.stdout logging handler and the
# new "tqdm" logging handler.
if len(gv.logger.handlers) > 0:
|
llvm/execution/FuncExecution: set input/output ctypes and the output
buffer in contructor.
These don't have to be recreated on every execution. | @@ -120,17 +120,23 @@ class FuncExecution(CUDAExecution):
self._execution_ids = execution_ids
self._component = component
- par_struct_ty, ctx_struct_ty, _, _ = self._bin_func.byref_arg_types
+ par_struct_ty, ctx_struct_ty, vi_ty, vo_ty = self._bin_func.byref_arg_types
if len(execution_ids) > 1:
self._bin_multirun = self._bin_func.get_multi_run()
par_struct_ty = par_struct_ty * len(execution_ids)
ctx_struct_ty = ctx_struct_ty * len(execution_ids)
+ vo_ty = vo_ty * len(execution_ids)
+ vi_ty = vi_ty * len(execution_ids)
par_initializer = (component._get_param_initializer(ex_id) for ex_id in execution_ids)
ctx_initializer = (component._get_context_initializer(ex_id) for ex_id in execution_ids)
self.__param_struct = par_struct_ty(*par_initializer)
self.__context_struct = ctx_struct_ty(*ctx_initializer)
+ self._ct_len = ctypes.c_int(len(execution_ids))
+
+ self._ct_vo = vo_ty()
+ self._vi_ty = vi_ty
def _get_compilation_param(self, name, initializer, arg, execution_id):
param = getattr(self._component._compilation_data, name)
@@ -158,11 +164,6 @@ class FuncExecution(CUDAExecution):
return self._get_compilation_param('context_struct', '_get_context_initializer', 1, self._execution_ids[0])
def execute(self, variable):
- _, _ , vi_ty, vo_ty = self._bin_func.byref_arg_types
- if len(self._execution_ids) > 1:
- vo_ty = vo_ty * len(self._execution_ids)
- vi_ty = vi_ty * len(self._execution_ids)
- ct_vo = vo_ty()
new_variable = np.asfarray(variable)
if len(self._execution_ids) > 1:
@@ -171,15 +172,14 @@ class FuncExecution(CUDAExecution):
ct_vi = np.ctypeslib.as_ctypes(new_variable)
self._bin_multirun.wrap_call(self._param_struct,
self._context_struct,
- ct_vi, ct_vo,
- ctypes.c_int(len(self._execution_ids)))
+ ct_vi, self._ct_vo, self._ct_len)
else:
- ct_vi = new_variable.ctypes.data_as(ctypes.POINTER(vi_ty))
+ ct_vi = new_variable.ctypes.data_as(ctypes.POINTER(self._vi_ty))
self._bin_func(ctypes.byref(self._param_struct),
ctypes.byref(self._context_struct),
- ct_vi, ctypes.byref(ct_vo))
+ ct_vi, ctypes.byref(self._ct_vo))
- return _convert_ctype_to_python(ct_vo)
+ return _convert_ctype_to_python(self._ct_vo)
class MechExecution(FuncExecution):
|
Update corehq/util/es/interface.py
Review improvement | @@ -82,7 +82,7 @@ class ElasticsearchInterface:
self._verify_is_alias(index_alias)
doc_adapter = self._get_doc_adapter(index_alias, doc_type)
query = {} if body is None else body
- params = params if params else {}
+ params = {} if params is not None else params
return doc_adapter.search(query, params=params, **kwargs)
def iter_scroll(self, index_alias, doc_type, body=None,
|
VTFLib wrapper: Add MacOS support into 3rd place,
how many times do i need to add it? | @@ -64,6 +64,8 @@ class VTFLib:
cls.vtflib_cdll = WinDLL(os.path.join(full_path, vtf_lib_name))
elif platform_name == "Linux":
cls.vtflib_cdll = cdll.LoadLibrary(os.path.join(full_path, vtf_lib_name))
+ elif platform_name == 'Darwin':
+ cls.vtflib_cdll = cdll.LoadLibrary(os.path.join(full_path, vtf_lib_name))
else:
raise NotImplementedError("Platform {} isn't supported".format(platform_name))
|
Update README.rst
update link, small change | NVIDIA Neural Modules: NeMo
===========================
-NeMo is one of the solutions offered in NVIDIA `Conversational AI tools <https://developer.nvidia.com/conversational-ai#started>`_
+NeMo is a toolkit for defining and building new state of the art deep learning models for `Conversational AI <https://developer.nvidia.com/conversational-ai#started>`_ applications
Goal of the NeMo toolkit is to make it possible for researchers to easily and safely compose complex neural network architectures for conversational AI using reusable components. Built for speed, NeMo can scale out training to multiple GPUs and multiple nodes.
|
improve broadcast handling in batching.py
We can avoid a circular import just by `import jax`! | import numpy as onp
from typing import Any, Callable, Dict, Optional, Tuple, Union
+import jax
from .. import core
from ..core import Trace, Tracer, new_master
from ..abstract_arrays import ShapedArray, raise_to_shaped
@@ -23,7 +24,6 @@ from .. import linear_util as lu
from ..util import unzip2, partial, safe_map, wrap_name, split_list
from . import xla
from . import partial_eval as pe
-from .. import lax_reference
map = safe_map
@@ -294,13 +294,6 @@ defvectorized(xla.device_put_p)
### util
-# These utilities depend on primitives for things like broadcasting, reshaping,
-# and transposition on arrays. To avoid a circular import from depending on
-# lax.py, these functions use method dispatch on their arguments, which could be
-# DeviceArrays, numpy.ndarrays, or traced versions of those. This strategy
-# almost works, except for broadcast, for which raw numpy.ndarrays don't have a
-# method. To handle that case, the `broadcast` function uses a try/except.
-
class _Last(object): pass
last = _Last()
@@ -312,10 +305,7 @@ def broadcast(x, sz, axis):
shape = list(onp.shape(x))
shape.insert(axis, sz)
broadcast_dims = tuple(onp.delete(onp.arange(len(shape)), axis))
- if isinstance(x, onp.ndarray) or onp.isscalar(x):
- return lax_reference.broadcast_in_dim(x, shape, broadcast_dims)
- else:
- return x.broadcast_in_dim(shape, broadcast_dims)
+ return jax.lax.broadcast_in_dim(x, shape, broadcast_dims)
def moveaxis(x, src, dst):
if core.get_aval(x) is core.abstract_unit:
|
Clean up TODO
has been merged. That said, `cirq.Symbol` has been replaced with `sympy.Symbol` anyway and the latter doesn't suffer from the issue addressed by `+0`. | @@ -733,7 +733,7 @@ class CZPowGate(eigen_gate.EigenGate,
if protocols.is_parameterized(self):
return NotImplemented
global_phase = 1j**(2 * self._exponent * self._global_shift)
- z_phase = 1j**(self._exponent + 0) # TODO: Cleanup after #1389.
+ z_phase = 1j**self._exponent
c = -1j * z_phase * np.sin(np.pi * self._exponent / 2) / 2
return {
'II': global_phase * (1 - c),
@@ -856,7 +856,7 @@ class CNotPowGate(eigen_gate.EigenGate, gate_features.TwoQubitGate):
if protocols.is_parameterized(self):
return NotImplemented
global_phase = 1j**(2 * self._exponent * self._global_shift)
- cnot_phase = 1j**(self._exponent + 0) # TODO: Cleanup after #1389.
+ cnot_phase = 1j**self._exponent
c = -1j * cnot_phase * np.sin(np.pi * self._exponent / 2) / 2
return {
'II': global_phase * (1 - c),
@@ -960,7 +960,7 @@ class SwapPowGate(eigen_gate.EigenGate,
if protocols.is_parameterized(self):
return NotImplemented
global_phase = 1j**(2 * self._exponent * self._global_shift)
- swap_phase = 1j**(self._exponent + 0) # TODO: Cleanup after #1389.
+ swap_phase = 1j**self._exponent
c = -1j * swap_phase * np.sin(np.pi * self._exponent / 2) / 2
return {
'II': global_phase * (1 - c),
|
Branding: raise custom error when constructing remote objects
The default KeyError message from dict lookup is just the missing key.
In order to give more context in the log message, we raise our own. | @@ -43,6 +43,9 @@ class RemoteObject:
def __init__(self, dictionary: t.Dict[str, t.Any]) -> None:
"""Initialize by grabbing annotated attributes from `dictionary`."""
+ missing_keys = self.__annotations__.keys() - dictionary.keys()
+ if missing_keys:
+ raise KeyError(f"Fetched object lacks expected keys: {missing_keys}")
for annotation in self.__annotations__:
setattr(self, annotation, dictionary[annotation])
|
[dagit] Flip section arrow in left nav
### Summary & Motivation
Flip the arrow to point right instead of left.
### How I Tested These Changes
View Dagit left nav, expand and collapse sections. | @@ -222,7 +222,7 @@ const SectionHeader = styled.button<{$open: boolean; $showRepoLocation: boolean}
${IconWrapper}[aria-label="arrow_drop_down"] {
transition: transform 100ms linear;
- ${({$open}) => ($open ? null : `transform: rotate(90deg);`)}
+ ${({$open}) => ($open ? null : `transform: rotate(-90deg);`)}
}
:disabled ${IconWrapper} {
|
SceneReader : Don't hash filename when calling SceneInterface::hash()
We now expect the underlying SceneInterface to have accounted for the filename (or file contents) itself. | @@ -141,21 +141,21 @@ void SceneReader::hashBound( const ScenePath &path, const Gaffer::Context *conte
{
SceneNode::hashBound( path, context, parent, h );
- fileNamePlug()->hash( h );
- refreshCountPlug()->hash( h );
-
ConstSceneInterfacePtr s = scene( path );
if( !s )
{
return;
}
+ refreshCountPlug()->hash( h );
+
if( s->hasBound() )
{
s->hash( SceneInterface::BoundHash, context->getTime(), h );
}
else
{
+ fileNamePlug()->hash( h );
h.append( &path.front(), path.size() );
}
}
@@ -187,15 +187,13 @@ void SceneReader::hashTransform( const ScenePath &path, const Gaffer::Context *c
{
SceneNode::hashTransform( path, context, parent, h );
- fileNamePlug()->hash( h );
- refreshCountPlug()->hash( h );
-
ConstSceneInterfacePtr s = scene( path );
if( !s )
{
return;
}
+ refreshCountPlug()->hash( h );
s->hash( SceneInterface::TransformHash, context->getTime(), h );
}
@@ -228,9 +226,7 @@ void SceneReader::hashAttributes( const ScenePath &path, const Gaffer::Context *
SceneNode::hashAttributes( path, context, parent, h );
- fileNamePlug()->hash( h );
refreshCountPlug()->hash( h );
-
s->hash( SceneInterface::AttributesHash, context->getTime(), h );
}
@@ -281,9 +277,7 @@ void SceneReader::hashObject( const ScenePath &path, const Gaffer::Context *cont
SceneNode::hashObject( path, context, parent, h );
- fileNamePlug()->hash( h );
refreshCountPlug()->hash( h );
-
s->hash( SceneInterface::ObjectHash, context->getTime(), h );
}
@@ -309,7 +303,6 @@ void SceneReader::hashChildNames( const ScenePath &path, const Gaffer::Context *
SceneNode::hashChildNames( path, context, parent, h );
- fileNamePlug()->hash( h );
refreshCountPlug()->hash( h );
// append a hash of the tags plug, as restricting the tags can affect the hierarchy
|
Fix `build.yaml` workflow file name
This PR fixes a typo in
Authors:
- AJ Schmidt (https://github.com/ajschmidt8)
Approvers:
- Sevag H (https://github.com/sevagh) | @@ -28,7 +28,7 @@ concurrency:
jobs:
conda-python-build:
secrets: inherit
- uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-build.yaml@main
+ uses: rapidsai/shared-action-workflows/.github/workflows/conda-python-matrix-build.yaml@main
with:
build_type: ${{ inputs.build_type || 'branch' }}
branch: ${{ inputs.branch }}
|
Use pytest.hookimpl instead of pytest.mark.hookwrapper
pytest.mark.hookwrapper seems to be used nowhere in the _pytest package. | @@ -399,22 +399,22 @@ class LoggingPlugin(object):
log = log_handler.stream.getvalue().strip()
item.add_report_section(when, 'log', log)
- @pytest.mark.hookwrapper
+ @pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self._runtest_for(item, 'setup'):
yield
- @pytest.mark.hookwrapper
+ @pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self._runtest_for(item, 'call'):
yield
- @pytest.mark.hookwrapper
+ @pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self._runtest_for(item, 'teardown'):
yield
- @pytest.mark.hookwrapper
+ @pytest.hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session):
"""Runs all collected test items."""
with catching_logs(self.log_cli_handler,
|
Turn off showProgressDetails
The file size and remaining time details are all buggy, so remove them.
Add ids for the UI Elements as this was missing. | })
uppy.use(Uppy.DragDrop, {
+ id: `${inputId}-DragDrop`,
target: `#${inputId}-drag-drop`,
});
uppy.use(Uppy.StatusBar, {
+ id: `${inputId}-StatusBar`,
target: `#${inputId}-progress`,
- showProgressDetails: true,
+ showProgressDetails: false,
hideCancelButton: true,
hidePauseResumeButton: true,
});
|
Remove dead code from adm lmdb module
These functions have been migrated over to the lmdb mod in validator,
where they are used and tested. | @@ -125,16 +125,6 @@ impl<'a> LmdbDatabaseReader<'a> {
Ok(val.ok().map(Vec::from))
}
- #[allow(dead_code)]
- pub fn cursor(&self) -> Result<LmdbDatabaseReaderCursor, DatabaseError> {
- let cursor = self
- .txn
- .cursor(&self.db.main)
- .map_err(|err| DatabaseError::ReaderError(format!("{}", err)))?;
- let access = self.txn.access();
- Ok(LmdbDatabaseReaderCursor { access, cursor })
- }
-
pub fn index_cursor(&self, index: &str) -> Result<LmdbDatabaseReaderCursor, DatabaseError> {
let index = self
.db
@@ -177,14 +167,6 @@ pub struct LmdbDatabaseReaderCursor<'a> {
}
impl<'a> LmdbDatabaseReaderCursor<'a> {
- #[allow(dead_code)]
- pub fn first(&mut self) -> Option<(Vec<u8>, Vec<u8>)> {
- self.cursor
- .first(&self.access)
- .ok()
- .map(|(key, value): (&[u8], &[u8])| (Vec::from(key), Vec::from(value)))
- }
-
pub fn last(&mut self) -> Option<(Vec<u8>, Vec<u8>)> {
self.cursor
.last(&self.access)
|
Prepare the 1.25.1rc1 release.
Work towards
[ci skip-rust-tests]
[ci skip-jvm-tests] | @@ -10,6 +10,19 @@ The ``1.25.x`` series brings two major changes to Pants:
Please see https://groups.google.com/forum/#!topic/pants-devel/3nmdSeyvwU0 for more information.
+1.25.1rc1 (6/16/2020)
+---------------------
+
+N.B.: No further releases are expected in the ``1.25.x`` ``stable`` series. This ``.1rc1``
+release is for those upgrading through stable versions who wish to retain pytest console
+coverage support in Pants "v1".
+
+Bugfixes
+~~~~~~~~
+
+* Restore pytest coverage console report. (#10018)
+ `PR #10018 <https://github.com/pantsbuild/pants/pull/10018>`_
+
1.25.1rc0 (03/20/2020)
----------------------
|
try to fix memcached service hc
HG--
branch : feature/microservices | "port": 11211,
"enableTagOverride": false,
"check": {
-{% if ansible_distribution in ['RedHat' or 'CentOS'] %}
+{% if ansible_distribution in ['RedHat'] %}
+ "script": "echo stats | nc localhost 11211 | grep uptime ||(exit 2)",
+{% elif ansible_distribution in ['CentOS'] %}
"script": "echo stats | nc localhost 11211 | grep uptime ||(exit 2)",
{% elif ansible_distribution in ['FreeBSD'] %}
"script": "echo stats | nc -N localhost 11211 | grep uptime ||(exit 2)",
|
igw: open iscsi target port
Open the port the iscsi target uses for iscsi traffic. | tags:
- firewall
+- name: open iscsi target ports
+ firewalld:
+ port: "3260/tcp"
+ zone: "{{ ceph_iscsi_firewall_zone }}"
+ source: "{{ public_network }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ notify: restart firewalld
+ when:
+ - iscsi_gw_group_name is defined
+ - iscsi_gw_group_name in group_names
+ - (firewalld_pkg_query.get('rc', 1) == 0 or is_atomic)
+ tags:
+ - firewall
+
- name: open iscsi api ports
firewalld:
port: "{{ api_port | default(5000) }}/tcp"
|
Handle interrupt signal
+ Proper number of arguments for handler, fixes
+ Do not interrupt once we are running to avoid leaving the system in an
indeterminate state, fixes | @@ -40,6 +40,7 @@ class DeprovisionHandler(object):
def __init__(self):
self.osutil = get_osutil()
self.protocol_util = get_protocol_util()
+ self.actions_running = False
signal.signal(signal.SIGINT, self.handle_interrupt_signal)
def del_root_password(self, warnings, actions):
@@ -134,11 +135,16 @@ class DeprovisionHandler(object):
if not confirm.lower().startswith('y'):
return
+ self.actions_running = True
for action in actions:
action.invoke()
- def handle_interrupt_signal(self, frame):
+ def handle_interrupt_signal(self, signum, frame):
+ if not self.actions_running:
print("Deprovision is interrupted.")
sys.exit(0)
+ print ('Deprovisioning may not be interrupted.')
+ return
+
|
quick fix in a_pareto_curve.py
comment out the problematic data frame operation | @@ -136,15 +136,16 @@ class ParetoCurveForOneGenerationPlot(cea.plots.optimization.GenerationPlotBase)
def calc_final_dataframe(individual_data):
user_defined_mcda = individual_data.loc[individual_data["user_MCDA_rank"] < 2]
- if user_defined_mcda.shape[0] > 1:
- individual = str(user_defined_mcda["individual_name"].values)
- user_defined_mcda = user_defined_mcda.reset_index(drop=True)
- user_defined_mcda = user_defined_mcda.iloc[0].T
- user_defined_mcda["System option"] = individual
+ # FIXME: comment out as the action is unclear and the dataframe shape does not match the expected outputs
+ # if user_defined_mcda.shape[0] > 1:
+ # individual = str(user_defined_mcda["individual_name"].values)
+ # user_defined_mcda = user_defined_mcda.reset_index(drop=True)
+ # user_defined_mcda = user_defined_mcda.iloc[0].T
+ # user_defined_mcda["System option"] = individual
# Now extend all dataframes
final_dataframe = user_defined_mcda.copy()
final_dataframe.reset_index(drop=True, inplace=True)
- final_dataframe["Attribute"] = ["user defined MCDA"]
+ final_dataframe["Attribute"] = "user defined MCDA"
return final_dataframe
|
update: update ceph release pattern in complete upgrade play
since master is now deploying quincy, we must update this.
Otherwise, it will fail like following:
```
Error EPERM: require_osd_release cannot be lowered once it has been set
``` | name: ceph-facts
tasks_from: container_binary.yml
- - name: container | disallow pre-pacific OSDs and enable all new pacific-only functionality
- command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release pacific"
+ - name: container | disallow pre-quincy OSDs and enable all new quincy-only functionality
+ command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release quincy"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
when:
- containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0
- - name: non container | disallow pre-pacific OSDs and enable all new pacific-only functionality
- command: "ceph --cluster {{ cluster }} osd require-osd-release pacific"
+ - name: non container | disallow pre-quincy OSDs and enable all new quincy-only functionality
+ command: "ceph --cluster {{ cluster }} osd require-osd-release quincy"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
when:
|
Updated readme
Update guide for MySQL, shared library and auto-update | @@ -111,6 +111,19 @@ Only add/remove to My List from within the addon keeps the Kodi library in sync.
### My watched status is not being updated?!
The addon does not report watched status back to Netflix (yet). This is a top priority on our roadmap, but we haven't been able to figure this out just yet.
+### Can i share the exported content in the library with multiple devices?
+Yes it is possible share the same library with multiple devices that using netflix addon.
+In order to work it is necessary use Kodi with a MySQL server.
+You can follow the official Kodi MySQL setup instructions at [Kodi Wiki](https://kodi.wiki/view/MySQL).
+When done, in each device that use this addon, open the addon settings and under Library page:
+- Check "Enable custom library folder", and choose a shared "Custom library path". The path must be the exact same on all devices.
+- Enable "Use MySQL shared library database", then set the same connection parameters used in Kodi MySQL setup.
+
+### Auto-update of exported content
+WARNING: AN INTENSIVE USE of AUTO-UPDATE function due to many exported tv shows MAY CAUSE A TEMPORARY BAN of the ACCOUNT that varies starting from 24/48 hours. Use at your own risk.
+If it happens often, there is the possibility to exclude the auto-updates from the tv shows, by open context menu on a tv show and selecting "Exclude from auto update".
+- If you want to use the auto-update with a shared exported content (to multiple devices), you need to set up one of the devices as the main library update manager, by using the menu "Set this device as main auto-updates manager" from the chosen device under Library page.
+
## Something doesn't work
If something doesn't work for you, please:
|
Remove unused table header in search template
The unused header was creating an extra line that we could resolve with
css instead of empty html tags. | {% endif %}
<div class="table-responsive">
<table class="table table-striped">
- <thead>
- <tr>
- <th></th>
- <th></th>
- </tr>
- </thead>
<tbody>
{% for host in hosts %}
<tr>
|
Update version to 1.5.0
Changes
Use latest `dwave-system` (0.8.x), which uses the latest
`dwave-cloud-client` (0.6.x)
Use latest `dwave-hybrid` (0.4.x) | # limitations under the License.
#
# ================================================================================================
-__version__ = '1.4.0'
+__version__ = '1.5.0'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = '[email protected]'
__description__ = 'Software development kit for open source D-Wave tools'
|
feat: start extracting constants from donation page
To serve as reference on how to do it for the rest of the app | @@ -2,21 +2,10 @@ import { Link, makeStyles, Typography } from "@material-ui/core";
import classNames from "classnames";
import HtmlMeta from "components/HtmlMeta";
import Markdown from "components/Markdown";
-import {
- BENEFACTOR_CONTACT1,
- BENEFACTOR_CONTACT2,
- BENEFACTOR_EMAIL,
- DONATIONS_BANNER_TEXT,
- DONATIONS_BANNER_TITLE,
- DONATIONS_TEXT,
- DONATIONS_TEXT2,
- DONATIONS_TITLE,
- DONATIONS_TITLE2,
-} from "features/donations/constants";
import Landscape from "features/donations/resources/landscape.jpeg";
+import { Trans, useTranslation } from "react-i18next";
import CouchersLogo from "resources/CouchersLogo";
-import { DONATE } from "../../constants";
import DonationsBox from "./DonationsBox";
const useStyles = makeStyles((theme) => ({
@@ -128,15 +117,17 @@ const useStyles = makeStyles((theme) => ({
export default function Donations() {
const classes = useStyles();
+ const { t } = useTranslation("donations");
+
return (
<>
- <HtmlMeta title={DONATE} />
+ <HtmlMeta title={t("donate")} />
<div className={classes.donationsWrapper}>
<div className={classes.donationsLogoHeader}>
<CouchersLogo className={classes.donationsLogo} />
<div className={classes.donationsLogoText}>
- <Typography variant="h2">{DONATIONS_BANNER_TITLE}</Typography>
- <Typography>{DONATIONS_BANNER_TEXT}</Typography>
+ <Typography variant="h2">{t("donations_banner_title")}</Typography>
+ <Typography>{t("donations_banner_body")}</Typography>
</div>
</div>
<img className={classes.donationsImage} src={Landscape} alt="" />
@@ -158,11 +149,16 @@ export default function Donations() {
)}
variant="body2"
>
- {BENEFACTOR_CONTACT1}{" "}
- <Link className={classes.link} href={"mailto:" + BENEFACTOR_EMAIL}>
- {BENEFACTOR_EMAIL}
- </Link>{" "}
- {BENEFACTOR_CONTACT2}
+ <Trans t={t} i18nKey="benefactor_contact">
+ If you wish to contribute over $1000, please contact us at
+ <Link
+ className={classes.link}
+ href={`mailto:${t("benefactor_email")}`}
+ >
+ {{ email: t("benefactor_email") }}
+ </Link>
+ for us to arrange a lower fee transfer.
+ </Trans>
</Typography>
<div
@@ -171,15 +167,18 @@ export default function Donations() {
classes.donationsLayoutText
)}
>
- <Typography variant="h1">{DONATIONS_TITLE}</Typography>
- <Markdown className={classes.donationsText} source={DONATIONS_TEXT} />
+ <Typography variant="h1">{t("donations_title")}</Typography>
+ <Markdown
+ className={classes.donationsText}
+ source={t("donations_text")}
+ />
</div>
<Typography
className={classes.donationsLayoutSecondaryTitle}
variant="h2"
>
- {DONATIONS_TITLE2}
+ {t("donations_title2")}
</Typography>
<Markdown
@@ -187,7 +186,7 @@ export default function Donations() {
classes.donationsText,
classes.donationsLayoutSecondaryText
)}
- source={DONATIONS_TEXT2}
+ source={t("donations_text2")}
/>
</section>
</>
|
Update rogue_dns.txt
Have no idea how to proceed all these tons of ```ns.*``` records. | @@ -379,3 +379,13 @@ ns2.gatherreceive.net
ns3.gatherreceive.net
ns4.gatherreceive.net
63.251.106.22:53
+
+# Reference: https://www.virustotal.com/gui/ip-address/184.73.137.229/relations
+# Reference: https://www.virustotal.com/gui/ip-address/34.229.84.179/relations
+# Reference: https://www.virustotal.com/gui/ip-address/34.230.76.81/relations
+# Reference: https://www.virustotal.com/gui/ip-address/54.227.204.233/relations
+
+184.73.137.229:53
+34.229.84.179:53
+34.230.76.81:53
+54.227.204.233:53
|
Apply suggestions from code review
Thanks for catching all of these little mistakes! | @@ -20,12 +20,12 @@ keywords that use the Playwright API.
Starting with CumulusCI version 3.59.0, we are providing experimental
support for Playwright and the Browser library in CumulusCI.
-In CumulusCI version 3.60 we've reorganized our keywords so that
+In CumulusCI 3.60, we've reorganized our keywords so that
a test can import the API and performance keywords without importing
Selenium keywords. To use Playwright-based keywords, import the resource file
`SalesforcePlaywright.robot
-<Keywords.html#file-cumulusci/robotframework/SalesforcePlaywright.robot>`_
-which imports the non-selenium keywords along with the keywords in
+<Keywords.html#file-cumulusci/robotframework/SalesforcePlaywright.robot>`_,
+which imports the non-Selenium keywords along with the keywords in
the `SalesforcePlaywright library <Keywords.html#file-cumulusci.robotframework.SalesforcePlaywright>`_.
@@ -87,9 +87,9 @@ write Playwright-based tests with off-the-shelf `keywords provided by
the Browser library
<https://marketsquare.github.io/robotframework-browser/Browser.html>`_
-To initialize playwright support in a test suite, import the
-SalesforcePlaywright.robot resource file as shown in the following
-example. It will import the Browser library, and defines the keywords
+To initialize Playwright support in a test suite, import the
+``SalesforcePlaywright.robot`` resource file as shown in the following
+example. It imports the Browser library and defines the keywords
``Open Test Browser`` and ``Delete records and close browser``.
.. code-block:: robotframework
@@ -131,7 +131,7 @@ Things to Notice
This example test is unable to use any of the existing
Selenium-based keywords, except for two. We've created a
new library based on Playwright and the Browser library with two
-keywods that are similar to existing keywords:
+keywords that are similar to existing keywords:
`Open Test Browser <Keywords.html#SalesforcePlaywright.Open%20Test%20Browser>`_
and
`Delete Records and Close Browser
|
FFU: Fix Keystone FFU tasks
We need to set facts instead of resigering values. | @@ -232,33 +232,43 @@ outputs:
tags: common
shell: "httpd -t -D DUMP_VHOSTS | grep -q keystone_wsgi"
ignore_errors: true
- register: httpd_enabled
+ register: keystone_httpd_enabled_result
+ when:
+ - step|int == 0
+ - release == 'ocata'
+ - name: Set fact keystone_httpd_enabled
+ set_fact:
+ keystone_httpd_enabled: "{{ keystone_httpd_enabled_result.rc == 0 }}"
+ when:
+ - step|int == 0
+ - release == 'ocata'
- name: Check if httpd is running
ignore_errors: True
- register: httpd_running
command: systemctl is-active --quiet httpd
+ register: httpd_running_result
+ when:
+ - step|int == 0
+ - release == 'ocata'
+ - httpd_running is undefined
+ - name: Set fact httpd_running if undefined
+ set_fact:
+ httpd_running: "{{ httpd_running_result.rc == 0 }}"
+ when:
+ - step|int == 0
+ - release == 'ocata'
+ - httpd_running is undefined
- name: Stop and disable keystone (under httpd)
service: name=httpd state=stopped enabled=no
when:
- - step|int == 2
+ - step|int == 1
- release == 'ocata'
- - httpd_enabled.rc == 0
- - httpd_running.rc == 0
+ - keystone_httpd_enabled|bool
+ - httpd_running|bool
- name: Keystone package update
- yum:
- name: '{{ package }}'
- state: latest
+ yum: name=openstack-keystone* state=latest
when:
- step|int == 6
- is_bootstrap_node|bool
- loop_control:
- loop_var: package
- with_items:
- - openstack-keystone
- - python2-keystoneclient
- - python-keystone
- - python2-keystonemiddleware
- - python2-keystoneauth1
- name: keystone db sync
command: keystone-manage db_sync
when:
|
Update directory_ldap.py
correcting Issue # | @@ -269,7 +269,7 @@ class LDAPDirectoryConnector(object):
if sn_value is not None:
user['lastname'] = sn_value
c_value = LDAPValueFormatter.get_attribute_value(record, six.text_type('c'))
- source_attributes['c'] = c_value if c_value else None
+ source_attributes['c'] = c_value.upper() if c_value else None
if c_value is not None:
user['country'] = c_value.upper()
|
Fix double x/y transform for use tags
Fix | @@ -20,8 +20,6 @@ def use(svg, node, font_size):
from . import SVG
svg.stream.push_state()
- svg.stream.transform(
- 1, 0, 0, 1, *svg.point(node.get('x'), node.get('y'), font_size))
for attribute in ('x', 'y', 'viewBox', 'mask'):
if attribute in node.attrib:
|
Fix bug with callback used for HDF5 attrs and filters
key in lambda function was fixed to its value in the last iteration of the "for" loop | @@ -32,12 +32,13 @@ __authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "27/01/2017"
-from silx.gui import qt
+import functools
import os.path
+import logging
+from silx.gui import qt
import silx.io
from .TextFormatter import TextFormatter
import silx.gui.hdf5
-import logging
_logger = logging.getLogger(__name__)
@@ -230,12 +231,14 @@ class Hdf5TableModel(qt.QAbstractTableModel):
self.__properties.append(_Property("compression", lambda x: "True (see filters)"))
for index in range(dcpl.get_nfilters()):
name = "filters[%d]" % index
- self.__properties.append(_Property(name, lambda x: self.__get_filter_info(x, index)))
+ callback = lambda index, x: self.__get_filter_info(x, index)
+ self.__properties.append(_Property(name, functools.partial(callback, index)))
if hasattr(obj, "attrs"):
for key in sorted(obj.attrs.keys()):
name = "attrs[%s]" % key
- self.__properties.append(_Property(name, lambda x: self.__formatter.toString(x.attrs[key])))
+ callback = lambda key, x: self.__formatter.toString(x.attrs[key])
+ self.__properties.append(_Property(name, functools.partial(callback, key)))
def __get_filter_info(self, dataset, filterIndex):
"""Get a tuple of readable info from dataset filters
|
fix(stock_zh_a_spot): fix stock_zh_a_spot interface
fix stock_zh_a_spot interface | @@ -55,7 +55,8 @@ def stock_zh_a_spot() -> pd.DataFrame:
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(zh_sina_a_stock_url, params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
- big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
+ big_df = pd.concat([big_df, pd.DataFrame(data_json)], ignore_index=True)
+
big_df = big_df.astype(
{
"trade": "float",
@@ -293,11 +294,11 @@ def stock_zh_a_cdr_daily(
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
- temp_df = data_df[start_date:end_date]
- temp_df["open"] = round(temp_df["open"], 2)
- temp_df["high"] = round(temp_df["high"], 2)
- temp_df["low"] = round(temp_df["low"], 2)
- temp_df["close"] = round(temp_df["close"], 2)
+ temp_df = data_df[start_date:end_date].copy()
+ temp_df["open"] = pd.to_numeric(temp_df["open"])
+ temp_df["high"] = pd.to_numeric(temp_df["high"])
+ temp_df["low"] = pd.to_numeric(temp_df["low"])
+ temp_df["close"] = pd.to_numeric(temp_df["close"])
temp_df.reset_index(inplace=True)
temp_df['date'] = temp_df['date'].dt.date
return temp_df
|
Documentation: android hide loading screen
Correct sample code for hiding the splash screen in android. | @@ -88,11 +88,11 @@ longer than necessary (with your app already being loaded) due to a
limitation with the way we check if the app has properly started.
In this case, the splash screen overlaps the app gui for a short time.
-To dismiss the loading screen explicitely in your code, use the `android`
+To dismiss the loading screen explicitly in your code, use the `android`
module::
- from android import hide_loading_screen
- hide_loading_screen()
+ from android import loadingscreen
+ loadingscreen.hide_loading_screen()
You can call it e.g. using ``kivy.clock.Clock.schedule_once`` to run it
in the first active frame of your app, or use the app build method.
|
in gen test, account for 0-indexing and outage being inclusive of end time step
first time step of outage was not being checked when asserting critical load equals sum of techs to load | @@ -59,7 +59,7 @@ class GeneratorSizingTests(ResourceTestCaseMixin, TestCase):
tech_to_load = list()
for tech in list_to_load:
if tech is not None:
- tech_to_load = [sum_t + t for sum_t, t in zip(tech_to_load, tech[outage_start:outage_end])]
+ tech_to_load = [sum_t + t for sum_t, t in zip(tech_to_load, tech[outage_start-1:outage_end])]
return tech_to_load
def test_generator_sizing_with_existing_pv(self):
@@ -123,5 +123,5 @@ class GeneratorSizingTests(ResourceTestCaseMixin, TestCase):
list_to_load = [generator_to_load, storage_to_load, pv_to_load]
tech_to_load = self.outage_tech_to_load(list_to_load, outage_start, outage_end)
- for x, y in zip(critical_load[outage_start:outage_end], tech_to_load):
+ for x, y in zip(critical_load[outage_start-1:outage_end], tech_to_load):
self.assertAlmostEquals(x, y, places=3)
|
CatalogueUI : Fix incorrect sorting of images
highlighted a case where the `ImagesPath` could end up being
pre-sorted before we call `setSortable( False ). Disabling sorting in
the constructor fixes this.
Fixes | @@ -430,10 +430,10 @@ class _ImageListing( GafferUI.PlugValueWidget ) :
self.__pathListing = GafferUI.PathListingWidget(
_ImagesPath( self.__images(), [] ),
columns = columns,
- allowMultipleSelection = True
+ allowMultipleSelection = True,
+ sortable = False
)
self.__pathListing.setDragPointer( "" )
- self.__pathListing.setSortable( False )
self.__pathListing.setHeaderVisible( len(columns) > 1 )
self.__pathListing.selectionChangedSignal().connect(
Gaffer.WeakMethod( self.__pathListingSelectionChanged ), scoped = False
|
updating test name
missing an 's' | @@ -26,4 +26,4 @@ jobs:
- name: Check django logs
run: docker logs django
- name: test
- run: docker-compose exec -T celery python manage.py test reo.tests.test_custom_rate reo.tests.test_demand_ratchet -v 2 --failfast --no-input
+ run: docker-compose exec -T celery python manage.py test reo.tests.test_custom_rates reo.tests.test_demand_ratchet -v 2 --failfast --no-input
|
(doc) update logging info - wording change
added log rotation on doc | @@ -12,6 +12,6 @@ For users who wish to locate and submit log files, they are located in the `/log
## Log File Management
-A separate log file will now be generated daily. When a new log file is created, if there are more than 7 files, the oldest ones will be deleted in order to limit disk storage usage. The log rotation feature is added on [Hummingbot version 0.17.0](https://docs.hummingbot.io/release-notes/0.17.0/#log-file-management-data-storage)
+A separate log file will now be generated daily. When a new log file is created, if there are more than 7 files, the oldest ones will be deleted in order to limit disk storage usage. The log rotation feature was added in [Hummingbot version 0.17.0](https://docs.hummingbot.io/release-notes/0.17.0/#log-file-management-data-storage).
If you are looking for support in handling errors or have questions about behavior reported in logs, you can find ways of contacting the team or community in our [support section](/support).
|
Fix docstring for npermutations in PermutationExplainer
Closes
Authors:
- Philip Hyunsu Cho (https://github.com/hcho3)
Approvers:
- Dante Gama Dessavre (https://github.com/dantegd)
URL: | @@ -235,7 +235,13 @@ class PermutationExplainer(SHAPBase):
CuPy, cuDF DataFrame/Series, NumPy ndarray and Pandas
DataFrame/Series.
npermutations : int (default = 10)
- The l1 regularization to use for feature selection.
+ Number of times to cycle through all the features, re-evaluating
+ the model at each step. Each cycle evaluates the model function
+ 2 * (# features + 1) times on a data matrix of (# background
+ data samples) rows. An exception to this is when
+ PermutationExplainer can avoid evaluating the model because a
+ feature's value is the same in X and the background dataset
+ (which is common for example with sparse features).
as_list : bool (default = True)
Set to True to return a list of arrays for multi-dimensional
models (like predict_proba functions) to match the SHAP package
|
Update neo/rawio/axonarawio.py
Fix spelling mistake in comment | @@ -436,7 +436,7 @@ class AxonaRawIO(BaseRawIO):
# Adapted or modified by Steffen Buergers, Julia Sprenger
def _get_temporal_mask(self, t_start, t_stop, tetrode_id):
- # Conenience function for creating a temporal mask given
+ # Convenience function for creating a temporal mask given
# start time (t_start) and stop time (t_stop)
# Used by _get_spike_raw_waveforms and _get_spike_timestamps
|
Fixes the natlas-services getting deleted when an agent requests an updated services version.
This was caused because python doesn't implicitly copy objects, so the del as_list was removing it from the original object. Closes | @@ -148,7 +148,7 @@ def submit():
@isAgentAuthenticated
def natlasServices():
if current_app.current_services["id"] != "None":
- tmpdict = current_app.current_services
+ tmpdict = current_app.current_services.copy() # make an actual copy of the dict so that we can remove the list
del tmpdict['as_list'] # don't return the "as_list" version of the services, which is only used for making a pretty table.
return json.dumps(tmpdict), 200, {'content-type':'application/json'}
return json.dumps(current_app.current_services), 404, {'content-type':'application/json'}
|
Deseasonify: make `get_package_names` an iterator
This simplifies the function and is more in-line with how the function
is being used. | import logging
import pkgutil
from pathlib import Path
-from typing import List
+from typing import Iterator, List
__all__ = ("get_package_names", "get_extensions")
log = logging.getLogger(__name__)
-def get_package_names() -> List[str]:
- """Return names of all packages located in /bot/exts/."""
- seasons = [
- package.name
- for package in pkgutil.iter_modules(__path__)
- if package.ispkg
- ]
-
- return seasons
+def get_package_names() -> Iterator[str]:
+ """Iterate names of all packages located in /bot/exts/."""
+ for package in pkgutil.iter_modules(__path__):
+ if package.ispkg:
+ yield package.name
def get_extensions() -> List[str]:
|
Acquire validation results lock once per fork
Prior to this commit on every block on a fork the validation results
cache's lock would be acquired, which is expensive. This commit changes
it so it is acquired for the whole fork. | @@ -732,12 +732,11 @@ impl<BV: BlockValidator + 'static> ChainController<BV> {
})
});
- for blk in result.new_chain.iter().rev() {
let mut cache = self
.block_validation_results
.write()
.expect("Unable to acquire read lock, due to poisoning");
-
+ for blk in result.new_chain.iter().rev() {
match cache.find(|result| &blk.header_signature == &result.block_id) {
Some(validation_results) => {
let receipts: Vec<TransactionReceipt> = validation_results
|
Adding location
Credit to for confirming the location: | @@ -46,6 +46,7 @@ Police open fire on protesters outside of city hall with teargas, flashbands, an
**Links**
* https://twitter.com/greg_doucette/status/1269017349727928320
+* [Location on Google Maps](https://www.google.com/maps/place/29+W+South+St,+Orlando,+FL+32801,+USA/@28.5384293,-81.3797504,20z/data=!4m5!3m4!1s0x88e77b01dadbb26d:0x1a303f1fe28f7d51!8m2!3d28.5384616!4d-81.3801581)
### Police box in protesters before curfew for mass arrest | June 3rd
|
Break up all the anndata tests
* Break up all the anndata tests
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see | @@ -224,6 +224,8 @@ def test_data_format():
adata_manager.get_from_registry(REGISTRY_KEYS.PROTEIN_EXP_KEY),
)
+
+def test_data_format_c_contiguous():
# if obsm is dataframe, make it C_CONTIGUOUS if it isnt
adata = synthetic_iid()
pe = np.asfortranarray(adata.obsm["protein_expression"])
@@ -272,6 +274,8 @@ def test_setup_anndata():
adata.uns["protein_names"],
)
+
+def test_setup_anndata_view_error():
# test that error is thrown if its a view:
adata = synthetic_iid()
with pytest.raises(ValueError):
@@ -294,6 +298,8 @@ def test_setup_anndata():
new_protein_names,
)
+
+def test_setup_anndata_layer():
# test that layer is working properly
adata = synthetic_iid()
true_x = adata.X
@@ -304,6 +310,8 @@ def test_setup_anndata():
adata_manager.get_from_registry(REGISTRY_KEYS.X_KEY), true_x
)
+
+def test_setup_anndat_create_label_batch():
# test that it creates labels and batch if no layers_key is passed
adata = synthetic_iid()
adata_manager = generic_setup_adata_manager(
@@ -320,12 +328,16 @@ def test_setup_anndata():
np.zeros((adata.shape[0], 1)),
)
+
+def test_setup_anndata_nan():
# test error is thrown when categorical obs field contains nans
adata = synthetic_iid()
adata.obs["batch"][:10] = np.nan
with pytest.raises(ValueError):
generic_setup_adata_manager(adata, batch_key="batch")
+
+def test_setup_anndata_cat():
# test error is thrown when categorical joint obsm field contains nans
adata = synthetic_iid()
adata.obs["cat1"] = np.random.randint(0, 5, size=(adata.shape[0],))
@@ -431,6 +443,8 @@ def test_anntorchdataset_getitem():
np.testing.assert_array_equal(all_registered_tensors, list(bd[1].keys()))
assert bd[1][REGISTRY_KEYS.X_KEY].shape[0] == bd.adata_manager.summary_stats.n_vars
+
+def test_anntorchdataset_numpy():
# check that AnnTorchDataset returns numpy array
adata1 = synthetic_iid()
adata1_manager = generic_setup_adata_manager(adata1)
@@ -446,6 +460,8 @@ def test_anntorchdataset_getitem():
for value in bd[1].values():
assert type(value) == np.ndarray
+
+def test_anntorchdataset_getitem_numpy_sparse():
# check AnnTorchDataset returns numpy array if pro exp was sparse
adata = synthetic_iid()
adata.obsm["protein_expression"] = sparse.csr_matrix(
@@ -458,12 +474,14 @@ def test_anntorchdataset_getitem():
for value in bd[1].values():
assert type(value) == np.ndarray
+
+def test_anntorchdataset_getitem_pro_exp():
# check pro exp is being returned as numpy array even if its DF
adata = synthetic_iid()
adata.obsm["protein_expression"] = pd.DataFrame(
adata.obsm["protein_expression"], index=adata.obs_names
)
- generic_setup_adata_manager(
+ adata_manager = generic_setup_adata_manager(
adata, batch_key="batch", protein_expression_obsm_key="protein_expression"
)
bd = AnnTorchDataset(adata_manager)
|
Update tilda-takeover.yaml
This update is based on this issue | @@ -11,9 +11,13 @@ requests:
- method: GET
path:
- "{{BaseURL}}"
-
+ matchers-condition: and
matchers:
- type: word
words:
- - <title>Please renew your subscription</title>
- Please go to the site settings and put the domain name in the Domain tab.
+ - type: word
+ words:
+ - "<title>Please renew your subscription</title>"
+ negative: true
+
|
fix time stats nested loops
With nested loops, the inner `LoopSum` or `LoopConcatenateCombined` incorrectly
created a new dictionary for inner time stats for every outer iteration. This
patch fixes this problem by creating a new dictionary only if there was none. | @@ -3483,7 +3483,7 @@ class LoopSum(Array):
def evalf_withtimes(self, times, shape, length, *args):
serialized = self._serialized
- times[self] = subtimes = collections.defaultdict(_Stats)
+ subtimes = times.setdefault(self, collections.defaultdict(_Stats))
result = numpy.zeros(shape, self.dtype)
for index in range(length):
values = [numpy.array(index)]
@@ -3686,7 +3686,7 @@ class LoopConcatenateCombined(Evaluable):
def evalf_withtimes(self, times, shapes, length, *args):
serialized = self._serialized
- times[self] = subtimes = collections.defaultdict(_Stats)
+ subtimes = times.setdefault(self, collections.defaultdict(_Stats))
results = [parallel.shempty(tuple(map(int, shape)), dtype=func.dtype) for func, shape in zip(self._funcs, shapes)]
for index in range(length):
values = [numpy.array(index)]
|
Call to_string instead of using format!
Format is a complex macro, so for objects that can has to_string called
on them, this is preferable. | @@ -342,7 +342,7 @@ impl From<ProtobufError> for Error {
IoError(err) => Error::EncodingError(format!("{}", err)),
WireError(err) => Error::EncodingError(format!("{:?}", err)),
Utf8(err) => Error::EncodingError(format!("{}", err)),
- MessageNotInitialized { message: err } => Error::EncodingError(format!("{}", err)),
+ MessageNotInitialized { message: err } => Error::EncodingError(err.to_string()),
}
}
}
|
Fix signature in docstring
Summary: Fixes callable signature
Test Plan: N/A
Reviewers: leoeer | @@ -39,7 +39,7 @@ class SystemStorageDefinition(
config_schema (Optional[ConfigSchema]): The schema for the storage's configuration schema.
Configuration data passed in this schema will be made available to the
``system_storage_creation_fn`` under ``init_context.system_storage_config``.
- system_storage_creation_fn: (Callable[InitSystemStorageContext, SystemStorageData])
+ system_storage_creation_fn: (Callable[[InitSystemStorageContext], SystemStorageData])
Called to construct the storage. This function should consume the init context and emit
a :py:class:`SystemStorageData`.
required_resource_keys(Set[str]): The resources that this storage needs at runtime to function.
|
Fix: TCP port number is zero in the error message
When failing to connect to a language server that is hosting a TCP
server, and when we choose a free TCP port, the error message shows
"Failed to connect on port 0", whereas that should be
"Failed to connect on port ${port}". | @@ -228,7 +228,7 @@ def create_transport(config: ClientConfig, cwd: Optional[str], window: sublime.W
if tcp_port:
sock = _connect_tcp(tcp_port)
if sock is None:
- raise RuntimeError("Failed to connect on port {}".format(config.tcp_port))
+ raise RuntimeError("Failed to connect on port {}".format(tcp_port))
reader = sock.makefile('rwb') # type: IO[bytes]
writer = reader
else:
|
use a different replay for some lotv tests:
- make sure there is really a zerg in that replay, otherwise the
test would test nothing at all
- remove outdated comment, that test does not fail. | @@ -435,19 +435,21 @@ class TestReplays(unittest.TestCase):
def test_lotv_creepTracker(self):
from sc2reader.engine.plugins import CreepTracker
- for replayfilename in ["test_replays/lotv/lotv1.SC2Replay"]:
+ for replayfilename in ["test_replays/4.0.0.59587/1.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
pluginEngine = sc2reader.engine.GameEngine(plugins=[CreepTracker()])
replay = factory.load_replay(replayfilename, engine=pluginEngine, load_map=True)
+ is_at_least_one_zerg_in_game = False
for player_id in replay.player:
if replay.player[player_id].play_race == "Zerg":
+ is_at_least_one_zerg_in_game = True
assert replay.player[player_id].max_creep_spread != 0
assert replay.player[player_id].creep_spread_by_minute
+ assert is_at_least_one_zerg_in_game
def test_lotv_map(self):
- # This test currently fails in decoders.py with 'TypeError: ord() expected a character, but string of length 0 found'
- for replayfilename in ["test_replays/lotv/lotv1.SC2Replay"]:
+ for replayfilename in ["test_replays/4.0.0.59587/1.SC2Replay"]:
factory = sc2reader.factories.SC2Factory()
replay = factory.load_replay(replayfilename, load_level=1, load_map=True)
|
Fix quantization with checkpoint wrapper
Summary: checkpoint wrapper deepcopy fix wasn't compatible with jitting. e.g | @@ -46,6 +46,9 @@ def unwrap_checkpoint(m: torch.nn.Module):
if hasattr(module, "precheckpoint_forward"):
module.forward = module.precheckpoint_forward
del module.precheckpoint_forward
+ if hasattr(module, "old_deepcopy_method"):
+ module.__deepcopy__ = module.old_deepcopy_method
+ del module.old_deepcopy_method
return m
|
Remove reference to deprecated readthedocs documentation in Contribute page
Removes the reference to the now-deprecated readthedocs documentation as noted in issue
The script mentioned (`dev_tools/docs/build-rtd-docs.sh`) is still present, maybe it should be removed as well? | @@ -303,14 +303,6 @@ def some_method(a: int, b: str) -> float:
"""
```
-The docs folder is used to automatically generate the documentation on our website at [quantumai.google/cirq](https://quantumai.google/cirq) from the `master` branch. You can also generate a local copy by running:
-
-```bash
-dev_tools/docs/build-rtd-docs.sh
-```
-
-The HTML output will go into the `dev_tools/rtd_docs/sphinx/_build` directory.
-
## Dependencies
### Production dependencies
|
Update coqa_official_evaluation_script.py
modifying scoring to only generate scores for sources actually seen during eval | @@ -185,6 +185,7 @@ class CoQAEvaluator():
for story_id, turn_id in self.gold_data:
key = (story_id, turn_id)
source = self.id_to_source[story_id]
+ if key in exact_scores and key in f1_scores:
sources[source]['em_total'] += exact_scores.get(key, 0)
sources[source]['f1_total'] += f1_scores.get(key, 0)
sources[source]['turn_count'] += 1
@@ -199,6 +200,7 @@ class CoQAEvaluator():
out_domain_turn_count = 0
for source in in_domain + out_domain:
+ if source in sources:
domain = domain_mappings[source]
scores[domain] = {}
scores[domain]['em'] = round(sources[source]['em_total'] / max(1, sources[source]['turn_count']) * 100, 1)
|
Update bookmark error handling
This moves sending the error response to within the except block, making it easier to parse what the code is doing. | @@ -74,10 +74,9 @@ class Bookmark(commands.Cog):
await member.send(embed=embed)
except discord.Forbidden:
error_embed = self.build_error_embed(f"{member.mention}, please enable your DMs to receive the bookmark.")
+ await channel.send(embed=error_embed)
else:
log.info(f"{member} bookmarked {target_message.jump_url} with title '{title}'")
- return
- await channel.send(embed=error_embed)
@commands.group(name="bookmark", aliases=("bm", "pin"), invoke_without_command=True)
@commands.guild_only()
|
[dagit] Polling query on assets page
Summary:
Resolves
Make the Assets list query use polling, with a refreshable countdown.
Test Plan: View Assets on Dagit, verify query polling and refreshing.
Reviewers: bengotow, prha, sandyryza | @@ -18,6 +18,7 @@ import {useHistory, Link} from 'react-router-dom';
import styled from 'styled-components/macro';
import {PythonErrorInfo, PYTHON_ERROR_FRAGMENT} from '../app/PythonErrorInfo';
+import {QueryCountdown} from '../app/QueryCountdown';
import {useDocumentTitle} from '../hooks/useDocumentTitle';
import {Box} from '../ui/Box';
import {Group} from '../ui/Group';
@@ -52,10 +53,14 @@ const EXPERIMENTAL_TAGS_WARNING = (
</Box>
);
-export const AssetsCatalogTable: React.FunctionComponent<{
- prefixPath?: string[];
-}> = ({prefixPath}) => {
- const queryResult = useQuery<AssetsTableQuery>(ASSETS_TABLE_QUERY);
+const POLL_INTERVAL = 15000;
+
+export const AssetsCatalogTable: React.FC<{prefixPath?: string[]}> = ({prefixPath}) => {
+ const queryResult = useQuery<AssetsTableQuery>(ASSETS_TABLE_QUERY, {
+ notifyOnNetworkStatusChange: true,
+ pollInterval: POLL_INTERVAL,
+ });
+
const [q, setQ] = React.useState<string>('');
const [view, setView] = useAssetView();
@@ -70,7 +75,7 @@ export const AssetsCatalogTable: React.FunctionComponent<{
return (
<div style={{flexGrow: 1}}>
- <Loading queryResult={queryResult}>
+ <Loading allowStaleData queryResult={queryResult}>
{({assetsOrError}) => {
if (assetsOrError.__typename === 'PythonError') {
return (
@@ -88,6 +93,7 @@ export const AssetsCatalogTable: React.FunctionComponent<{
prefixPath.every((part: string, i: number) => part === asset.key.path[i]),
)
: assetsOrError.nodes;
+
const matching = isFlattened
? filterAssets(assets, q)
: assets.filter((asset) => !q || matches(asset.key.path.join('/'), q));
@@ -124,6 +130,8 @@ export const AssetsCatalogTable: React.FunctionComponent<{
const showSwitcher = prefixPath || assets.some((asset) => asset.key.path.length > 1);
return (
<Wrapper>
+ <Box flex={{justifyContent: 'space-between'}}>
+ <div>
{showSwitcher ? (
<Group spacing={8} direction="row">
<ButtonGroup>
@@ -151,6 +159,9 @@ export const AssetsCatalogTable: React.FunctionComponent<{
) : (
<AssetSearch assets={allAssets} />
)}
+ </div>
+ <QueryCountdown pollInterval={POLL_INTERVAL} queryResult={queryResult} />
+ </Box>
<AssetsTable
assets={matching}
currentPath={prefixPath || []}
|
feat(device): add deconz support for WXCJKG13LMLightController
related to | @@ -199,26 +199,66 @@ class WXCJKG13LMLightController(LightController):
return {
"button_1_single": Light.OFF,
"button_1_double": Light.SYNC,
+ # "button_1_triple": "", # Nothing
# "button_1_hold": "", # Nothing
# "button_1_release": "", # Nothing
"button_2_single": Light.ON,
"button_2_double": Light.SYNC,
+ # "button_2_triple": "", # Nothing
# "button_2_hold": "", # Nothing
# "button_2_release": "", # Nothing
"button_3_single": Light.CLICK_BRIGHTNESS_DOWN,
"button_3_double": Light.ON_MIN_BRIGHTNESS,
+ # "button_3_triple": "", # Nothing
"button_3_hold": Light.HOLD_BRIGHTNESS_DOWN,
"button_3_release": Light.RELEASE,
"button_4_single": Light.CLICK_BRIGHTNESS_UP,
"button_4_double": Light.ON_FULL_BRIGHTNESS,
+ # "button_4_triple": "", # Nothing
"button_4_hold": Light.HOLD_BRIGHTNESS_UP,
"button_4_release": Light.RELEASE,
"button_5_single": Light.CLICK_COLOR_DOWN,
"button_5_double": Light.ON_MIN_COLOR_TEMP,
+ # "button_5_triple": "", # Nothing
"button_5_hold": Light.HOLD_COLOR_DOWN,
"button_5_release": Light.RELEASE,
"button_6_single": Light.CLICK_COLOR_UP,
"button_6_double": Light.ON_FULL_COLOR_TEMP,
+ # "button_6_triple": "", # Nothing
"button_6_hold": Light.HOLD_COLOR_UP,
"button_6_release": Light.RELEASE,
}
+
+ def get_deconz_actions_mapping(self) -> TypeActionsMapping:
+ return {
+ 1002: Light.OFF,
+ 1004: Light.SYNC,
+ # 1005: "", # Nothing
+ # 1001: "", # Nothing
+ # 1003: "", # Nothing
+ 2002: Light.ON,
+ 2004: Light.SYNC,
+ # 2005: "", # Nothing
+ # 2001: "", # Nothing
+ # 2003: "", # Nothing
+ 3002: Light.CLICK_BRIGHTNESS_DOWN,
+ 3004: Light.ON_MIN_BRIGHTNESS,
+ # 3005: "", # Nothing
+ 3001: Light.HOLD_BRIGHTNESS_DOWN,
+ 3003: Light.RELEASE,
+ 4002: Light.CLICK_BRIGHTNESS_UP,
+ 4004: Light.ON_FULL_BRIGHTNESS,
+ # 4005: "", # Nothing
+ 4001: Light.HOLD_BRIGHTNESS_UP,
+ 4003: Light.RELEASE,
+ 5002: Light.CLICK_COLOR_DOWN,
+ 5004: Light.ON_MIN_COLOR_TEMP,
+ # 5005: "", # Nothing
+ 5001: Light.HOLD_COLOR_DOWN,
+ 5003: Light.RELEASE,
+ 6002: Light.CLICK_COLOR_UP,
+ 6004: Light.ON_FULL_COLOR_TEMP,
+ # 6005: "", # Nothing
+ 6001: Light.HOLD_COLOR_UP,
+ 6003: Light.RELEASE,
+ }
|
enh(harvest) log n-rows cleared ...
useful explanation when harvesting selectively sources,
to understand how many more visits discovered. | @@ -72,12 +72,15 @@ def visits_to_sqlite(vit: Iterable[Res[DbVisit]], *, overwrite_db: bool) -> List
meta.create_all()
cleared: Set[str] = set()
+ ncleared = 0
with engine.begin() as conn:
for chunk in chunked(vit_ok(), n=_CHUNK_BY):
srcs = set(v.src or '' for v in chunk)
new = srcs.difference(cleared)
+
for src in new:
conn.execute(table.delete().where(table.c.src == src))
+ ncleared += conn.execute("SELECT changes()").fetchone()[0]
cleared.add(src)
bound = [binder.to_row(x) for x in chunk]
@@ -90,7 +93,9 @@ def visits_to_sqlite(vit: Iterable[Res[DbVisit]], *, overwrite_db: bool) -> List
errs = '' if errors == 0 else f', {errors} ERRORS'
total = ok + errors
what = 'overwritten' if overwrite_db else 'updated'
- logger.info('%s database "%s". %d total (%d OK%s)', what, db_path, total, ok, errs)
+ logger.info(
+ '%s database "%s". %d total (%d OK%s, %d cleared, +%d more)',
+ what, db_path, total, ok, errs, ncleared, ok - ncleared)
res: List[Exception] = []
if total == 0:
res.append(RuntimeError('No visits were indexed, something is probably wrong!'))
|
Add check for rtree import
Fixed correct return type from generate_hypotheses | @@ -5,8 +5,11 @@ from operator import attrgetter
import numpy as np
import scipy as sp
-import rtree
from scipy.spatial import KDTree
+try:
+ import rtree
+except ImportError:
+ rtree = None
from .base import DataAssociator
@@ -48,7 +51,7 @@ class DetectionKDTreeMixIn(DataAssociator):
def generate_hypotheses(self, tracks, detections, timestamp, **kwargs):
# No need for tree here.
if not tracks:
- return set()
+ return {}
if not detections:
return {track: self.hypothesiser.hypothesise(
track, detections, timestamp, **kwargs)
|
Update README.md with new start
Change amount of memory, CPUs, IP addresses. | @@ -25,34 +25,17 @@ two local virtual machines.
If you want to change CTF Placeholder, edit
picoCTF-web/web/_includes/header.html
-If you want to change the IP address and VM names (e.g. to have duplicates
-running on the same host VM), change the following lines:
+There are now quick ways to change the memory, number of CPUs and IP addresses and run multiple instances.
-ansible/inventories/devploy:dev_web ansible_host=192.168.2.4 hostname=pico-local-dev-web-db
+After you do the git clone, rename picoCTF to picoCTF_XXX (fill in XXX with something unique for each)
+Start by running a command like the below.
+J is the number of CPUs
+M is the amount of memory in GB
+SIP is shell IP address (default is 192.168.2.2)
+WIP is web IP address (default is 192.68.2.3)
-ansible/inventories/devploy:dev_shell ansible_host=192.168.2.5 hostname=pico-local-dev-shell
+J=2 M=6 SIP=192.168.2.53 WIP=192.168.2.52 vagrant up shell && SIP=192.168.2.53 WIP=192.168.2.52 vagrant up web
-ansible/inventories/devploy:dev_web ansible_host=192.168.2.4 hostname=pico-local-dev-web-db
-
-ansible/group_vars/local_development/vars.yml:web_address: "http://192.168.2.4"
-
-ansible/group_vars/local_development/vars.yml:shell_hostname: "192.168.2.5"
-
-Vagrantfile: config.vm.define "shell2", primary: true do |shell|
-
-Vagrantfile: config.vm.define "web2", primary: true do |web|
-
-Vagrantfile: vb.name = "picoCTF-shell-dev2"
-
-Vagrantfile: vb.name = "picoCTF-web-dev2"
-
-Vagrantfile: also update IP addresses to match
-
-If you want to add more memory or CPUs, you can do (directly underneath vb.name):
-
-Vagrantfile: vb.customize ["modifyvm", :id, "--memory", "4096"]
-
-Vagrantfile: vb.customize ["modifyvm", :id, "--cpus", "4"]
## Project Overview
|
enhancement: add 'cls' argument to .processors.find
add 'cls' argument to anyconfig.processors.find to allow comparison of
other class object in anyconfig.processors.find_by_type maybe called
later from it. | @@ -149,13 +149,14 @@ def find_by_maybe_file(obj, prs):
return processor()
-def find(obj, prs, forced_type=None):
+def find(obj, prs, forced_type=None, cls=anyconfig.models.processor.Processor):
"""
:param obj:
a file path, file or file-like object, pathlib.Path object or
`~anyconfig.globals.IOInfo` (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type: Forced processor type or processor object itself
+ :param cls: A class object to compare with `forced_type` later
:return: an instance of processor class to process `obj` data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
@@ -172,7 +173,7 @@ def find(obj, prs, forced_type=None):
return processor
- processor = find_by_type(forced_type, prs)
+ processor = find_by_type(forced_type, prs, cls=cls)
if processor is None:
raise UnknownProcessorTypeError(forced_type)
|
test_queue_system_while_system: wait for execution
we shouldn't leak that other execution we queued, otherwise we'll
be getting errors in the mgmtworker later. Wait for it to finish
properly. | @@ -413,6 +413,7 @@ class ExecutionsTest(AgentlessTestCase):
# Make sure snapshot_2 started while the snapshot_3 is queued again
self._assert_execution_status(snapshot_3.id, Execution.QUEUED)
self.wait_for_execution_to_end(snapshot_2)
+ self.wait_for_execution_to_end(snapshot_3)
def test_queue_system_exec_from_queue_while_exec_is_running(self):
"""
|
Update HtmlFilter.py
Added to the example script to give more of an example. | -# start the service
+# Some Services do not like anything other than pure text
+# while other Services will produce text with markup tags included
+# To join (route) the output of a Service with markup tags to one that
+# doesn't support the markup tags, we need to filter it.
+# Enter the HtmlFilter service
+
+# The most common use for this service is from a chatbot service like
+# ProgramAB to a TTS Service like MarySpeech Service.
+
+# Start the service
htmlfilter = Runtime.start("htmlfilter","HtmlFilter")
+
+# Start one of the Text To Speach Service
+mouth = Runtime.start("mouth", "MarySpeech")
+
+# Start a chatbox service to generate an output
+alice2 = Runtime.createAndStart("alice2", "ProgramAB")
+
+# Load a session into the chatbox
+alice2.startSession("user", "alice2")
+
+# Add routing from the the chatbox service to the HtmlFilter service
+alice2.addTextListener(htmlfilter)
+
+# Add routing to the TTS service from the htmlfilter
+htmlfilter.addTextListener(mouth)
|
make iOS build accept CFLAGS and CPPFLAGS set via profile
These flags have been overwritten, but to accept for example
the -fembed-bitcode flag, it needs to be picked up from the environment. | @@ -321,10 +321,14 @@ class LibcurlConan(ConanFile):
if self.settings.os == "iOS":
iphoneos = tools.apple_sdk_name(self.settings)
ios_dev_target = str(self.settings.os.version).split(".")[0]
+
+ env_cppflags = tools.get_env("CPPFLAGS", "")
+ socket_flags = " -DHAVE_SOCKET -DHAVE_FCNTL_O_NONBLOCK"
if self.settings.arch in ["x86", "x86_64"]:
- autotools_vars['CPPFLAGS'] = "-D__IPHONE_OS_VERSION_MIN_REQUIRED={}0000".format(ios_dev_target)
+ autotools_vars['CPPFLAGS'] = "-D__IPHONE_OS_VERSION_MIN_REQUIRED={}0000 {} {}".format(
+ ios_dev_target, socket_flags , env_cppflags)
elif self.settings.arch in ["armv7", "armv7s", "armv8"]:
- autotools_vars['CPPFLAGS'] = ""
+ autotools_vars['CPPFLAGS'] = "{} {}".format(socket_flags, env_cppflags)
else:
raise ConanInvalidConfiguration("Unsuported iOS arch {}".format(self.settings.arch))
@@ -342,13 +346,14 @@ class LibcurlConan(ConanFile):
arch_flag = "-arch {}".format(configure_arch)
ios_min_version = tools.apple_deployment_target_flag(self.settings.os, self.settings.os.version)
extra_flag = "-Werror=partial-availability"
- extra_def = " -DHAVE_SOCKET -DHAVE_FCNTL_O_NONBLOCK"
+
# if we debug, maybe add a -gdwarf-2 , but why would we want that?
autotools_vars['CC'] = cc
autotools_vars['IPHONEOS_DEPLOYMENT_TARGET'] = ios_dev_target
+ env_cflags = tools.get_env("CFLAGS", "")
autotools_vars['CFLAGS'] = "{} {} {} {}".format(
- sysroot, arch_flag, ios_min_version, extra_flag
+ sysroot, arch_flag, ios_min_version, env_cflags
)
if self.options.with_openssl:
@@ -358,8 +363,6 @@ class LibcurlConan(ConanFile):
else:
autotools_vars['LDFLAGS'] = "{} {}".format(arch_flag, sysroot)
- autotools_vars['CPPFLAGS'] += extra_def
-
elif self.settings.os == "Android":
# nothing do to at the moment, this seems to just work
pass
|
Fix MasterPublicIP regex in cfncluster-release-check.py
It was wrong because it was matching just the first character of the ip. | @@ -102,7 +102,7 @@ def run_test(region, distro, scheduler, key_name):
'status', testname], stderr=stderr_f)
dump_array = dump.splitlines()
for line in dump_array:
- m = re.search('MasterPublicIP: (.+?)', line)
+ m = re.search('MasterPublicIP: (.+)$', line)
if m:
master_ip = m.group(1)
break
|
Includes app_config when starting get_config.py
The get_config.py scripts supports providing the app_config
file path as an argument, but mbed.py does not include it,
when starting get_config.py, which means that the configuration
being built will differ from the output of get_config.py, when
building with an app_config != mbed_app.json. | @@ -2728,6 +2728,7 @@ def compile_(toolchain=None, target=None, macro=False, profile=False,
+ list(chain.from_iterable(zip(repeat('--profile'), profile or [])))
+ list(chain.from_iterable(zip(repeat('--source'), source)))
+ (['-v'] if verbose else [])
+ + (['--app-config', app_config] if app_config else [])
+ (list(chain.from_iterable(zip(repeat('--prefix'), config_prefix))) if config_prefix else []),
env=env)
else:
|
Update development workflow documentation
Changed formatting of the "make" keyword to indicate that it's a command. | @@ -29,9 +29,9 @@ directory.
#### Build & Installation
-Elyra uses make to automate some of the development workflow tasks.
+Elyra uses `make` to automate some of the development workflow tasks.
-Issuing a make command with no task specified will provide a list of the currently supported tasks.
+Issuing a `make` command with no task specified will provide a list of the currently supported tasks.
```bash
$ make
|
Fix
fix for backing off to previous standard pixel renderer | @@ -184,7 +184,7 @@ class RendererBase(object):
if dst_order is None:
dst_order = self.viewer.rgb_order
if src_order is None:
- src_order = self.std_order
+ src_order = self.rgb_order
if src_order != dst_order:
arr = trcalc.reorder_image(dst_order, arr, src_order)
|
Tests: different prefix for mock rses created by factories
this allows for a drop-in replcaement of pre-defined (ex: MOCK4)
rses in tests which rely on the RSE path being at a specific
location. Some such tests are in test_bin_rucio.py | @@ -75,11 +75,14 @@ class TemporaryRSEFactory:
else:
rse_id = rse_core.add_rse(rse_name, vo=self.vo, **(add_rse_kwargs or {}))
if scheme and protocol_impl:
+ prefix = '/test_%s/' % rse_id
+ if protocol_impl == 'rucio.rse.protocols.posix.Default':
+ prefix = '/tmp/rucio_rse/test_%s/' % rse_id
protocol_parameters = {
'scheme': scheme,
'hostname': '%s.cern.ch' % rse_id,
'port': 0,
- 'prefix': '/test_%s/' % rse_id,
+ 'prefix': prefix,
'impl': protocol_impl,
'domains': {
'wan': {
|
Update hclu.py
reformat code | @@ -90,7 +90,9 @@ class HighConfidenceLowUncertainty(Attack):
return (pred - args['conf']).reshape(-1)
def constraint_unc(x, args): # constraint for uncertainty
- return (args['max_uncertainty'] - (args['classifier'].predict_uncertainty(x.reshape(1, -1))).reshape(-1))[0]
+ return (
+ args['max_uncertainty'] - (args['classifier'].predict_uncertainty(x.reshape(1, -1))).reshape(
+ -1))[0]
bounds = []
# adding bounds, to not go away from original data
@@ -98,14 +100,17 @@ class HighConfidenceLowUncertainty(Attack):
bounds.append((self.min_val, self.max_val))
for i in range(np.shape(x)[0]): # go though data amd craft
# get properties for attack
- max_uncertainty = self.unc_increase * self.classifier.predict_uncertainty(x_adv[i].reshape(1, -1))
+ max_uncertainty = self.unc_increase * self.classifier.predict_uncertainty(
+ x_adv[i].reshape(1, -1))
class_zero = not self.classifier.predict(x_adv[i].reshape(1, -1))[0, 0] < 0.5
- init_args = {'classifier': self.classifier, 'class_zero': class_zero, 'max_uncertainty': max_uncertainty,'conf':self.conf}
+ init_args = {'classifier': self.classifier, 'class_zero': class_zero,
+ 'max_uncertainty': max_uncertainty, 'conf': self.conf}
constr_conf = {'type': 'ineq', 'fun': constraint_conf, 'args': (init_args,)}
constr_unc = {'type': 'ineq', 'fun': constraint_unc, 'args': (init_args,)}
args = {'args': init_args, 'orig': x[i].reshape(-1)}
# #finally, run optimization
- x_adv[i] = minimize(minfun, x_adv[i], args=args, bounds=bounds, constraints=[constr_conf, constr_unc])['x']
+ x_adv[i] = \
+ minimize(minfun, x_adv[i], args=args, bounds=bounds, constraints=[constr_conf, constr_unc])['x']
return x_adv
def set_params(self, **kwargs):
|
Add to Desktop menu item in tree view
fixes frappe/erpnext#10548 | @@ -339,7 +339,15 @@ frappe.views.TreeView = Class.extend({
if (has_perm) {
me.page.add_menu_item(menu_item["label"], menu_item["action"]);
}
- })
+ });
+
+ // last menu item
+ me.page.add_menu_item(__('Add to Desktop'), () => {
+ const label = me.doctype === 'Account' ?
+ __('Chart of Accounts') :
+ __(me.doctype);
+ frappe.add_to_desktop(label, me.doctype);
+ });
}
});
|
Improves various docstrings and comments.
Thanks to for suggesting most of these in their code review. | @@ -24,14 +24,6 @@ class RedisCache:
"""
A simplified interface for a Redis connection.
- This class must be created as a class attribute in a class. This is because it
- uses __set_name__ to create a namespace like MyCog.my_class_attribute which is
- used as a hash name when we store stuff in Redis, to prevent collisions.
-
- The class this object is instantiated in must also contains an attribute with an
- instance of Bot. This is because Bot contains our redis_pool, which is how this
- class communicates with the Redis server.
-
We implement several convenient methods that are fairly similar to have a dict
behaves, and should be familiar to Python users. The biggest difference is that
all the public methods in this class are coroutines, and must be awaited.
@@ -39,6 +31,10 @@ class RedisCache:
Because of limitations in Redis, this cache will only accept strings, integers and
floats both for keys and values.
+ Please note that this class MUST be created as a class attribute, and that that class
+ must also contain an attribute with an instance of our Bot. See `__get__` and `__set_name__`
+ for more information about how this works.
+
Simple example for how to use this:
class SomeCog(Cog):
@@ -78,12 +74,18 @@ class RedisCache:
_namespaces = []
def __init__(self) -> None:
- """Raise a NotImplementedError if `__set_name__` hasn't been run."""
+ """Initialize the RedisCache."""
self._namespace = None
self.bot = None
def _set_namespace(self, namespace: str) -> None:
"""Try to set the namespace, but do not permit collisions."""
+ # We need a unique namespace, to prevent collisions. This loop
+ # will try appending underscores to the end of the namespace until
+ # it finds one that is unique.
+ #
+ # For example, if `john` and `john_` are both taken, the namespace will
+ # be `john__` at the end of this loop.
while namespace in self._namespaces:
namespace += "_"
@@ -136,11 +138,26 @@ class RedisCache:
Set the namespace to Class.attribute_name.
Called automatically when this class is constructed inside a class as an attribute.
+
+ This class MUST be created as a class attribute in a class, otherwise it will raise
+ exceptions whenever a method is used. This is because it uses this method to create
+ a namespace like `MyCog.my_class_attribute` which is used as a hash name when we store
+ stuff in Redis, to prevent collisions.
"""
self._set_namespace(f"{owner.__name__}.{attribute_name}")
def __get__(self, instance: RedisCache, owner: Any) -> RedisCache:
- """Fetch the Bot instance, we need it for the redis pool."""
+ """
+ This is called if the RedisCache is a class attribute, and is accessed.
+
+ The class this object is instantiated in must contain an attribute with an
+ instance of Bot. This is because Bot contains our redis_session, which is
+ the mechanism by which we will communicate with the Redis server.
+
+ Any attempt to use RedisCache in a class that does not have a Bot instance
+ will fail. It is mostly intended to be used inside of a Cog, although theoretically
+ it should work in any class that has a Bot instance.
+ """
if self.bot:
return self
|
[varLib] Fix building variation of PairPosFormat2
I broke this with
Ouch! | @@ -278,13 +278,8 @@ def merge(merger, self, lst):
merger.valueFormat1 = self.ValueFormat1
merger.valueFormat2 = self.ValueFormat2
- if self.Format == 2:
- # Everything must match; we don't support smart merge yet.
- merger.mergeObjects(self, lst)
- del merger.valueFormat1, merger.valueFormat2
- return
+ if self.Format == 1:
- assert self.Format == 1
# Merge everything else; makes sure Format is the same.
merger.mergeObjects(self, lst,
exclude=('Coverage',
@@ -308,6 +303,14 @@ def merge(merger, self, lst):
merger.mergeLists(self.PairSet, padded)
+ elif self.Format == 2:
+
+ # Everything must match; we don't support smart merge yet.
+ merger.mergeObjects(self, lst)
+
+ else:
+ assert 0
+
del merger.valueFormat1, merger.valueFormat2
# Now examine the list of value records, and update to the union of format values,
|
[Doctest] Fix `Blenderbot` doctest
fix blenderbot doctest
add correct expected value | @@ -544,7 +544,7 @@ BLENDERBOT_GENERATION_EXAMPLE = r"""
>>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")
>>> next_reply_ids = model.generate(**inputs)
>>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
- Bot: That's too bad. Have you tried encouraging them to change their eating habits?
+ Bot: I see. Well, it's good that they're trying to change their eating habits.
```
"""
|
Round model lagging frame drop percentage
alerts: round model frame drop percentage | @@ -277,7 +277,7 @@ def high_cpu_usage_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: boo
def modeld_lagging_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
- return NormalPermanentAlert("Driving model lagging", f"{sm['modelV2'].frameDropPerc}% frames dropped")
+ return NormalPermanentAlert("Driving model lagging", f"{sm['modelV2'].frameDropPerc:.1f}% frames dropped")
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
|
chore: new forum URL
[skip ci] | blank_issues_enabled: false
contact_links:
- name: Community Forum
- url: https://discuss.erpnext.com/
+ url: https://discuss.frappe.io/c/framework/5
about: For general QnA, discussions and community help.
|
Fix dependency in forseti.service
Wants= is only valid in the Unit section of a systemd unit. | @@ -48,6 +48,7 @@ SQL_PROXY_COMMAND+=" -instances=${SQL_INSTANCE_CONN_STRING}=tcp:${SQL_PORT}"
API_SERVICE="$(cat << EOF
[Unit]
Description=Forseti API Server
+Wants=cloudsqlproxy.service
[Service]
User=ubuntu
Restart=always
@@ -55,7 +56,6 @@ RestartSec=3
ExecStart=$FORSETI_COMMAND
[Install]
WantedBy=multi-user.target
-Wants=cloudsqlproxy.service
EOF
)"
echo "$API_SERVICE" > /tmp/forseti.service
|
Implement BatchIndex, TransactionIndex for InMemoryBlockstore
The InMemoryBlockstore is used in the BlockManager tests and will be used in
the ChainCommitState tests | @@ -126,6 +126,40 @@ impl BlockStore for InMemoryBlockStore {
}
}
+impl BatchIndex for InMemoryBlockStore {
+ fn contains(&self, id: &str) -> Result<bool, BlockStoreError> {
+ Ok(self
+ .iter()?
+ .flat_map(|block| block.batches)
+ .any(|batch| &batch.header_signature == id))
+ }
+
+ fn get_block_by_id(&self, id: &str) -> Result<Option<Block>, BlockStoreError> {
+ Ok(self
+ .iter()?
+ .find(|block| block.batch_ids.contains(&id.into())))
+ }
+}
+
+impl TransactionIndex for InMemoryBlockStore {
+ fn contains(&self, id: &str) -> Result<bool, BlockStoreError> {
+ Ok(self
+ .iter()?
+ .flat_map(|block| block.batches)
+ .flat_map(|batch| batch.transactions)
+ .any(|txn| &txn.header_signature == id))
+ }
+
+ fn get_block_by_id(&self, id: &str) -> Result<Option<Block>, BlockStoreError> {
+ Ok(self.iter()?.find(|block| {
+ block
+ .batches
+ .iter()
+ .any(|batch| batch.transaction_ids.contains(&id.into()))
+ }))
+ }
+}
+
struct InMemoryGetBlockIterator<'a> {
blockstore: &'a InMemoryBlockStore,
block_ids: Vec<String>,
|
Adjust command to backfill (less granular)
Rates began from
This adjusts the command to backfill by year.
If 2016, let's backfill from May.
If 2017, let's backfill from the beginning of the year. | @@ -153,12 +153,19 @@ class PopulateMonthlyBilling(Command):
option_list = (
Option('-s', '-service-id', dest='service_id',
help="Service id to populate monthly billing for"),
- Option('-m', '-month', dest="month", help="Use for integer value for month, e.g. 7 for July"),
Option('-y', '-year', dest="year", help="Use for integer value for year, e.g. 2017")
)
- def run(self, service_id, month, year):
- print('Starting populating monthly billing')
+ def run(self, service_id, year):
+ start, end = 1, 13
+ if year == '2016':
+ start = 6
+
+ print('Starting populating monthly billing for {}'.format(year))
+ for i in range(start, end):
+ self.populate(service_id, year, i)
+
+ def populate(self, service_id, year, month):
create_or_update_monthly_billing_sms(service_id, datetime(int(year), int(month), 1))
results = get_monthly_billing_sms(service_id, datetime(int(year), int(month), 1))
print("Finished populating data for {} for service id {}".format(month, service_id))
|
Updates setup.py to reflect new dependencies & templates.
Matplotlib dependency replaced with plotly.
Removed python-pptx optional dependency.
Altered staged files from templates/*.{tex,pptx} to
templates/*.html and templates/css/*.css files. | @@ -41,11 +41,10 @@ setup(name='pyGSTi',
author_email='[email protected]',
packages=['pygsti', 'pygsti.algorithms', 'pygsti.construction', 'pygsti.drivers', 'pygsti.io', 'pygsti.objects', 'pygsti.optimize', 'pygsti.report', 'pygsti.tools'],
package_dir={'': 'packages'},
- package_data={'pygsti.report': ['templates/*.tex', 'templates/*.pptx']},
- requires=['numpy','scipy','matplotlib','pyparsing'],
+ package_data={'pygsti.report': ['templates/*.html', 'templates/css/*.css']},
+ requires=['numpy','scipy','plotly','pyparsing'],
extras_require = {
'diamond norm computation': ['cvxpy', 'cvxopt'],
- 'powerpoint file generation': ['python-pptx'],
'nose testing' : ['nose'],
'image comparison' : ['Pillow'],
'accurate memory profiling' : ['psutil']
|
Update README.md
Added a step in the readme for pipx-installation, to make sure 'pipx ensurepath' is executed to access the installed packages. | @@ -62,6 +62,7 @@ If you aren't familiar with installing python applications, I recommend you inst
* Open `Terminal` (search for `Terminal` in Spotlight or look in `Applications/Utilities`)
* Install `homebrew` according to instructions at [https://brew.sh/](https://brew.sh/)
* Type the following into Terminal: `brew install pipx`
+* Ensure that pipx installed packages are accessible in your PATH by typing: `pipx ensurepath`
* Then type this: `pipx install osxphotos`
* Now you should be able to run `osxphotos` by typing: `osxphotos`
|
filestore-to-bluestore: do not use --destroy
Do not use `--destroy` when zapping a device.
Otherwise, it destroys VGs while they are still needed to redeploy the
OSDs. | ceph_volume:
action: "zap"
osd_fsid: "{{ item }}"
+ destroy: False
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
|
Disable dagstermill 3.5 tests
Summary:
We are tracking this issue here,
Until fixed let's disable this
Test Plan: BK
Reviewers: nate | @@ -101,11 +101,12 @@ def publish_test_images():
return tests
-def python_modules_tox_tests(directory):
+def python_modules_tox_tests(directory, supported_pythons=None):
label = directory.replace("/", "-")
tests = []
# See: https://github.com/dagster-io/dagster/issues/1960
- for version in SupportedPythons + [SupportedPython.V3_8]:
+ supported_pythons = supported_pythons or SupportedPythons + [SupportedPython.V3_8]
+ for version in supported_pythons:
# pyspark doesn't support Python 3.8 yet
# See: https://github.com/dagster-io/dagster/issues/1960
@@ -659,7 +660,16 @@ def releasability_tests():
steps += python_modules_tox_tests("dagster")
steps += python_modules_tox_tests("dagster-graphql")
- steps += python_modules_tox_tests("dagstermill")
+ steps += python_modules_tox_tests(
+ "dagstermill",
+ # Disabled 3.5 https://github.com/dagster-io/dagster/issues/2034
+ supported_pythons=[
+ SupportedPython.V2_7,
+ SupportedPython.V3_6,
+ SupportedPython.V3_7,
+ SupportedPython.V3_8,
+ ],
+ )
steps += library_tests()
steps += releasability_tests()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.