message
stringlengths
13
484
diff
stringlengths
38
4.63k
apidoc for emr_pyspark_step_launcher Summary: {F283445} Test Plan: - manual inspection Reviewers: nate, sashank, yuhan
{ "spark_config": get_spark_config(), "cluster_id": Field( - StringSource, description="Name of the job flow (cluster) on which to execute" + StringSource, description="Name of the job flow (cluster) on which to execute." + ), + "region_name": Field(StringSource, description="The AWS region that the cluster is in."), + "action_on_failure": Field( + str, + is_required=False, + default_value="CANCEL_AND_WAIT", + description="The EMR action to take when the cluster step fails: " + "https://docs.aws.amazon.com/emr/latest/APIReference/API_StepConfig.html", ), - "region_name": Field(StringSource), - "action_on_failure": Field(str, is_required=False, default_value="CANCEL_AND_WAIT"), "staging_bucket": Field( StringSource, is_required=True, @@ -89,6 +95,12 @@ def emr_pyspark_step_launcher(context): return EmrPySparkStepLauncher(**context.resource_config) +emr_pyspark_step_launcher.__doc__ = "\n".join( + "- **" + option + "**: " + (field.description or "") + for option, field in emr_pyspark_step_launcher.config_schema.config_type.fields.items() +) + + class EmrPySparkStepLauncher(StepLauncher): def __init__( self,
Update for JupyterLab version 2 and 3 Made change to datadir and window.Jupyter_VPython to remove leading dot before directory /static so that it works in JupyterLab 2 and 3.
@@ -83,8 +83,8 @@ export function setupWebsocket(msg,serviceUrl) { wscheckfontsloaded(msg,serviceUrl) } -var datadir = './static/lab/vpython_data/' -window.Jupyter_VPython = "./static/lab/vpython_data/" // prefix used by glow.min.js for textures +var datadir = '/static/lab/vpython_data/' +window.Jupyter_VPython = "/static/lab/vpython_data/" // prefix used by glow.min.js for textures function fontloading() { "use strict";
Increase warning threshold for total spawn rate and reword the warning content.
@@ -657,14 +657,15 @@ class MasterRunner(DistributedRunner): ) # Since https://github.com/locustio/locust/pull/1621, the master is responsible for dispatching and controlling - # the total spawn rate which is more CPU intensive for the master. The number 100 is a little arbitrary as the computational + # the total spawn rate which is more CPU intensive for the master. The number 200 is a little arbitrary as the computational # load on the master greatly depends on the number of workers and the number of user classes. For instance, # 5 user classes and 5 workers can easily do 200/s. However, 200/s with 50 workers and 20 user classes will likely make the # dispatch very slow because of the required computations. I (@mboutet) doubt that many Locust's users are # spawning that rapidly. If so, then they'll likely open issues on GitHub in which case I'll (@mboutet) take a look. - if spawn_rate > 100: + if spawn_rate > 200: logger.warning( - "Your selected total spawn rate is high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?" + "Your selected total spawn rate is quite high (>200), and this is known to sometimes cause performance issues on the master. " + "Do you really need to ramp up that fast? If so and if encountering performance issues on the master, free to open an issue." ) if self.state != STATE_RUNNING and self.state != STATE_SPAWNING:
Update update.py fixed miss spelling
@@ -21,7 +21,7 @@ class Updater(object): The first line is processed by :meth:`chainer.dataset.Iterator.__next__` and the second and third lines are processed by :meth:`~chainer.Optimizer.update`. - but also users can implements the original updating process by overide :meth:`update`. + but also users can implements the original updating process by overiding :meth:`update`. """
Make error-reporting for stratisd not running more precise The ServiceUnknown error has something to do with actually trying to start the service. The NameHasNoOwner error is what you initially get when the sevice is missing.
@@ -83,10 +83,6 @@ def interpret_errors(errors): "Most likely there is an error in the source at line %d " "in file %s. The text of the line is \"%s\".") return fmt_str % (frame.lineno, frame.filename, frame.line) - if isinstance(error, dbus.exceptions.DBusException) and \ - error.get_dbus_name() == \ - 'org.freedesktop.DBus.Error.ServiceUnknown': - return "Most likely the Stratis daemon, stratisd, is not running." if isinstance(error, DbusClientUnknownSearchPropertiesError): return _STRATIS_CLI_BUG_MSG if isinstance(error, DbusClientMissingSearchPropertiesError): @@ -102,6 +98,10 @@ def interpret_errors(errors): error.get_dbus_name() == \ 'org.freedesktop.DBus.Error.AccessDenied': return "Most likely stratis has insufficient permissions for the action requested." + if isinstance(error, dbus.exceptions.DBusException) and \ + error.get_dbus_name() == \ + 'org.freedesktop.DBus.Error.NameHasNoOwner': + return "Most likely the Stratis daemon, stratisd, is not running." return None # pylint: disable=broad-except
update terraform to add rating service URL to frontend add env var RATING_SERVICE_ADDR to frontend deployment.
@@ -190,6 +190,7 @@ resource "null_resource" "deploy_services" { kubectl apply -f ../kubernetes-manifests/productcatalogservice.yaml kubectl apply -f ../kubernetes-manifests/recommendationservice.yaml kubectl apply -f ../kubernetes-manifests/shippingservice.yaml + kubectl set env -f ../kubernetes-manifests/frontend.yaml RATING_SERVICE_ADDR=${module.ratingservice.service_url} EOT }
Unpin pyproj Latest version has fixed the issue requiring the pin in the first place.
@@ -118,8 +118,7 @@ ENV LD_LIBRARY_PATH=/opt/conda/lib RUN apt-get -y install zlib1g-dev liblcms2-dev libwebp-dev libgeos-dev && \ pip install matplotlib && \ pip install pyshp && \ - # b/144569992 pyproj 2.4.1 is failing to install because of missing METADATA file. - pip install pyproj==2.4.0 && \ + pip install pyproj && \ conda install basemap && \ # sasl is apparently an ibis dependency apt-get -y install libsasl2-dev && \
bug fix in case password is lile admin:adm:in
@@ -42,7 +42,8 @@ class ModuleProcessor: authorization = binascii.a2b_base64( authorization ).decode('utf-8') # binascii is returning bytes - username, password = authorization.split(":") + username = authorization.split(":")[0] + password = ":".join(authorization.split(":")[1:]) insert_honeypot_events_from_module_processor( ip, username,
Increase memory allocated to Fuchsia VMs to 3GB. The previous value of 2GB was the same as the rss_limit_mb, so in conditions with high memory usage the system would OOM before libFuzzer could have a chance to catch it.
@@ -136,7 +136,7 @@ class QemuProcess(object): # yapf: disable qemu_args = [ - '-m', '2048', + '-m', '3072', '-nographic', '-kernel', qemu_vars['kernel_path'], '-initrd', qemu_vars['initrd_path'],
Update CONTRIBUTING.md add myself
@@ -222,7 +222,7 @@ You can choose from three different environments to test your fixes/changes, bas ``./cloudformation/delete-databases.sh`` -## Recommended Visual Studio Code Recommended settings +## Recommended Visual Studio Code Recommended setting ```json {
Fix positional-only differences in `threading.local` Last one of these, I think.
@@ -114,9 +114,9 @@ TIMEOUT_MAX: float class ThreadError(Exception): ... class local: - def __getattribute__(self, name: str) -> Any: ... - def __setattr__(self, name: str, value: Any) -> None: ... - def __delattr__(self, name: str) -> None: ... + def __getattribute__(self, __name: str) -> Any: ... + def __setattr__(self, __name: str, __value: Any) -> None: ... + def __delattr__(self, __name: str) -> None: ... class Thread: name: str
skip Amazon Linux Twisted tests for now Tested-by: Build Bot
@@ -132,6 +132,8 @@ class _TxTestCase(TestCase, TestMixin): return TxCluster def setUp(self): + if re.match(r'.*am.?z.?n.*',distro.id().lower()): + raise SkipTest("TODO: fix Amazon Linux Twisted timeouts") TestMixin.setUp(self) self.setUpTrace(self) super(_TxTestCase, self).setUp()
applies the necessary changes * applies the necessary changes Closes * removes the extra space character * Update molpro.py * Update molpro.py
@@ -15,7 +15,7 @@ from .model import ProgramHarness class MolproHarness(ProgramHarness): - _defaults = { + _defaults: Dict[str, Any] = { "name": "Molpro", "scratch": True, "thread_safe": False, @@ -50,6 +50,7 @@ class MolproHarness(ProgramHarness): _post_hf_methods: Set[str] = {'MP2', 'CCSD', 'CCSD(T)'} _supported_methods: Set[str] = {*_scf_methods, *_post_hf_methods} + class Config(ProgramHarness.Config): pass
make conv_depthwise_dnnlowp_op_test faster Summary: Pull Request resolved: As title says.
@@ -23,7 +23,7 @@ class DNNLowPOpConvDepthWiseTest(hu.HypothesisTestCase): stride=st.integers(1, 2), size=st.integers(10, 16), # depthwise 3x3 fast path only works for a multiple of 8 - group=st.sampled_from([8, 32, 40]), + group=st.sampled_from([8, 24, 32]), batch_size=st.integers(1, 3), prepack_weight=st.booleans(), share_col_buffer=st.booleans(), @@ -166,10 +166,10 @@ class DNNLowPOpConvDepthWiseTest(hu.HypothesisTestCase): @given( stride=st.integers(1, 2), - size=st.integers(10, 16), + size=st.integers(4, 12), # depthwise 3x3x3 fast path only works for a multiple of 8 - group=st.sampled_from([8, 32, 40]), - batch_size=st.integers(1, 3), + group=st.sampled_from([8, 24, 32]), + batch_size=st.integers(1, 2), prepack_weight=st.booleans(), fuse_relu=st.booleans(), share_col_buffer=st.booleans(),
validate: fix credentials validation This task is failing when `ceph_docker_registry_auth` is enabled and `ceph_docker_registry_username` is undefined with an ansible error instead of the expected message. Closes:
msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set' when: - ceph_docker_registry_auth | bool - - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) - - (ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0) + - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or + (ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0)
WL: outputs can skip to current screen if there's only one May as well save those function calls, considering single-output is likely the most common setup.
@@ -86,6 +86,8 @@ class Output(HasListeners): @property def screen(self) -> Screen: assert self.core.qtile is not None + + if len(self.core.qtile.screens) > 1: x, y, w, h = self.get_geometry() for screen in self.core.qtile.screens: if screen.x == x and screen.y == y:
Bound arguments are ordered The `arguments` attribute of the `BoundArguments` is specified as an "ordered, mutable mapping (collections.OrderedDict)" not just a mutable mapping:
import sys from typing import (AbstractSet, Any, Callable, Dict, Generator, List, Mapping, - MutableMapping, NamedTuple, Optional, Sequence, Tuple, - Union, + NamedTuple, Optional, Sequence, Tuple, Union, ) from types import CodeType, FrameType, ModuleType, TracebackType +from collections import OrderedDict # # Types and members @@ -159,7 +159,7 @@ class Parameter: annotation: Any = ...) -> Parameter: ... class BoundArguments: - arguments: MutableMapping[str, Any] + arguments: OrderedDict[str, Any] args: Tuple[Any, ...] kwargs: Dict[str, Any] signature: Signature
remove unused capture Summary: We don't use this in the lambda body anymore. Remove it to fix a warning. Pull Request resolved:
@@ -84,7 +84,7 @@ void binary_kernel_reduce(TensorIterator& iter, ops_t ops, init_t init) { ); iter.foreach_reduced_elt([&](TensorIterator &sub_iter) { auto reduction_body = [&](acc_t acc, int64_t begin, int64_t end) -> acc_t { - sub_iter.serial_for_each([&acc, &ops, &init](int ntensors, char** data, const int64_t* strides, int64_t size) { + sub_iter.serial_for_each([&acc, &ops](int ntensors, char** data, const int64_t* strides, int64_t size) { AT_ASSERT(ntensors == 2); char *in = data[1]; int64_t stride = strides[1];
color for markdown Use the html version of color rendering
-:math:`\mathrm{\color{orange} {QuSpin\ 0.3.3}}` here (updated on 2019.10.15) +<span style="color:orange">QuSpin 0.3.3</span> here (updated on 2019.10.15) ============================================================================ -:math:`\mathrm{\color{red} {Highlights}}`: OpenMP support here; Constrained Hilbert spaces support now here! +<span style="color:red">Highlights</span>: OpenMP support here; Constrained Hilbert spaces support now here! =================================================================== Check out :ref:`parallelization-label` and the example script :ref:`example12-label`. @@ -57,7 +57,7 @@ Improved Functionality ++++++++++++++++++++++ * support for python 3.7. -* :math:`\mathrm{\color{red} {discontinued\ support}}` for python 3.5 on all platforms and python 2.7 on windows. QuSpin for these versions will remain available to download up to and including QuSpin 0.3.0, but they are no longer being maintained. +* <span style="color:red">discontinued support</span> for python 3.5 on all platforms and python 2.7 on windows. QuSpin for these versions will remain available to download up to and including QuSpin 0.3.0, but they are no longer being maintained. * matplotlib is no longer a required package to install quspin. It is still required to run the examples, though. * parallelization: New parallel features added or improved + OpenMP support for osx. Requires a different build of QuSpin (see also :ref:`parallelization-label`). * new OpenMP features in operators module (see :ref:`parallelization-label` and example script :ref:`example12-label`).
chore: use Search repositories endpoint (github api) Actually, updateapilist use List organization repositories endpoint. We can improve this call by using Search repositories endpoint.
@@ -117,7 +117,6 @@ def client_for_repo(repo_slug) -> Optional[CloudClient]: return CloudClient(response.json()) -REPO_LIST_JSON = "https://api.github.com/orgs/googleapis/repos?per_page=100&page={page_number}" REPO_EXCLUSION = [ # core libraries "googleapis/python-api-core", @@ -150,15 +149,17 @@ def all_clients() -> List[CloudClient]: while first_request or 'next' in response.links: if first_request: - url = REPO_LIST_JSON.format(page_number=1) + url = "https://api.github.com/search/repositories?page=1" first_request = False else: url = response.links['next']['url'] headers = {'Authorization': f'token {token}'} - response = requests.get(url=url, headers= headers) - if len(response.json()) == 0: + params = {'per_page': 100, "q": "python- in:name org:googleapis"} + response = requests.get(url=url, params=params, headers=headers) + repositories = response.json().get("items", []) + if len(repositories) == 0: break - clients.extend(get_clients_batch_from_response_json(response.json())) + clients.extend(get_clients_batch_from_response_json(repositories)) # remove empty clients return [client for client in clients if client]
revert changes with minimum_review This solution might be too dangerous and have unforeseeable consequences.
@@ -762,7 +762,7 @@ class ExtractReview(pyblish.api.InstancePlugin): """ start_frame = int(start_frame) end_frame = int(end_frame) - collections = clique.assemble(files, minimum_items=1)[0] + collections = clique.assemble(files)[0] msg = "Multiple collections {} found.".format(collections) assert len(collections) == 1, msg col = collections[0] @@ -846,7 +846,7 @@ class ExtractReview(pyblish.api.InstancePlugin): dst_staging_dir = new_repre["stagingDir"] if temp_data["input_is_sequence"]: - collections = clique.assemble(repre["files"], minimum_items=1)[0] + collections = clique.assemble(repre["files"])[0] full_input_path = os.path.join( src_staging_dir, collections[0].format("{head}{padding}{tail}")
Fix BaseDataClass Summary: Pull Request resolved: There is some inheritance problem when `BaseDataClass` is a standard dataclass but the subclass is a pydantic dataclass. Since `BaseDataClass` doesn't have its own field, it doesn't need to be a dataclass.
We should revisit this at some point. Config classes shouldn't subclass from this. """ import dataclasses -from dataclasses import dataclass from typing import cast -@dataclass class BaseDataClass: def _replace(self, **kwargs): return cast(type(self), dataclasses.replace(self, **kwargs))
Fix GlobalIndex with non integer values PointMass and BearingElements with "n_link" argument were getting non integer values. Matrices would not be built, once these values are used as reference to array indexes (which must be integers).
@@ -355,29 +355,33 @@ class Rotor(object): global_dof_mapping = {} for k, v in dof_mapping.items(): dof_letter, dof_number = k.split("_") - global_dof_mapping[dof_letter + "_" + str(int(dof_number) + elm.n)] = v + global_dof_mapping[ + dof_letter + "_" + str(int(dof_number) + elm.n) + ] = int(v) dof_tuple = namedtuple("GlobalIndex", global_dof_mapping) if elm.n <= n_last + 1: for k, v in global_dof_mapping.items(): - global_dof_mapping[k] = self.number_dof * elm.n + v + global_dof_mapping[k] = int(self.number_dof * elm.n + v) else: for k, v in global_dof_mapping.items(): - global_dof_mapping[k] = ( + global_dof_mapping[k] = int( 2 * n_last + self.number_dof / 2 * elm.n + self.number_dof + v ) if hasattr(elm, "n_link") and elm.n_link is not None: if elm.n_link <= n_last + 1: - global_dof_mapping[f"x_{elm.n_link}"] = self.number_dof * elm.n_link - global_dof_mapping[f"y_{elm.n_link}"] = ( + global_dof_mapping[f"x_{elm.n_link}"] = int( + self.number_dof * elm.n_link + ) + global_dof_mapping[f"y_{elm.n_link}"] = int( self.number_dof * elm.n_link + 1 ) else: - global_dof_mapping[f"x_{elm.n_link}"] = ( + global_dof_mapping[f"x_{elm.n_link}"] = int( 2 * n_last + 2 * elm.n_link + self.number_dof ) - global_dof_mapping[f"y_{elm.n_link}"] = ( + global_dof_mapping[f"y_{elm.n_link}"] = int( 2 * n_last + 2 * elm.n_link + self.number_dof + 1 )
[glyf] Remove reprflag() If compatibility is an issue, we should rewrite it in terms of "bin(flag)".
@@ -1732,20 +1732,6 @@ class GlyphCoordinates(object): __nonzero__ = __bool__ -def reprflag(flag): - bin = "" - if isinstance(flag, str): - flag = byteord(flag) - while flag: - if flag & 0x01: - bin = "1" + bin - else: - bin = "0" + bin - flag = flag >> 1 - bin = (14 - len(bin)) * "0" + bin - return bin - - if __name__ == "__main__": import doctest, sys sys.exit(doctest.testmod().failed)
Improved handling of Quantities ar array_annotations further Had forgotten _merge_array_annotations in SpikeTrain
@@ -690,8 +690,12 @@ class SpikeTrain(DataObject): keys = self.array_annotations.keys() for key in keys: try: - self_ann = copy.copy(self.array_annotations[key]) - other_ann = copy.copy(other.array_annotations[key]) + self_ann = copy.deepcopy(self.array_annotations[key]) + other_ann = copy.deepcopy(other.array_annotations[key]) + if isinstance(self_ann, pq.Quantity): + other_ann.rescale(self_ann.units) + arr_ann = np.concatenate([self_ann, other_ann]) * self_ann.units + else: arr_ann = np.concatenate([self_ann, other_ann]) merged_array_annotations[key] = arr_ann[sorting] # Annotation only available in 'self', must be skipped
Add the autobahn results to the build artifacts This allows for the results to be viewed via gitlab.
@@ -35,3 +35,6 @@ autobahn: - python3 setup.py install - cd compliance/autobahn && nohup python3 server.py & - cd compliance/autobahn && wstest -m fuzzingclient && python summarise.py + artifacts: + paths: + - compliance/autobahn/reports/servers/
core/ProgressBar: add Docstrings Add docstrings for: is_running is_determinate start() stop()
@@ -42,19 +42,39 @@ class ProgressBar(Widget): @property def is_running(self): + """ + Use ``start()`` and ``stop()`` to change the running state. + + Returns: + True if this progress bar is running + False otherwise + """ return self._is_running @property def is_determinate(self): + """ + Determinate progress bars have a numeric ``max`` value (not None). + + Returns: + True if this progress bar is determinate (``max`` is not None) + False if ``max`` is None + """ return self.max is not None def start(self): + """ + Starting this progress bar puts it into running mode. + """ self.enabled = True if not self.is_running: self._impl.start() self._is_running = True def stop(self): + """ + Stop this progress bar (if not already stopped). + """ self._enabled = bool(self.max) if self.is_running: self._impl.stop()
DOC: Fix description of dtype default in linspace DOC: Fix description of dtype default in linspace Clarify that inferred type will never be integer. Fixes
@@ -52,8 +52,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, If True, return (`samples`, `step`), where `step` is the spacing between samples. dtype : dtype, optional - The type of the output array. If `dtype` is not given, infer the data - type from the other input arguments. + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will + never be an integer; `float` is chosen even if the arguments would + produce an array of integers. .. versionadded:: 1.9.0
Display local and remote digests as hexstring not raw bytes This will improve output in logging.
@@ -126,7 +126,7 @@ class Listener(Thread): my_digest = mac.digest() except IndexError: raise ValueError('Did not receive any or enough data from %s', addr[0]) - self.logger.debug("Computed my digest to be %s; remote is %s", my_digest, their_digest) + self.logger.debug("Computed my digest to be %s; remote is %s", my_digest.hex(), their_digest.hex()) if not hmac.compare_digest(their_digest, my_digest): raise Exception("Mismatched MAC for network logging data from %s\nMismatched key? Old version of SimpleMonitor?\n" % addr[0]) result = pickle.loads(pickled)
Update is iterable detection to use iter() instead of checking isinstance Iterable Resolves
from __future__ import absolute_import, division, print_function, unicode_literals import os -from collections import Iterable from six import binary_type, string_types, text_type @@ -25,7 +24,11 @@ def is_multivalued(value): return False # general rule: multivalued if iterable - return isinstance(value, Iterable) + try: + iter(value) + return True + except TypeError: + return False def combine_kwargs(**kwargs):
Update receivers.py #PERFORMANCE Consider using literal syntax to create the data structure This is because here, the name dict must be looked up in the global scope in case it has been rebound. Same goes for the other two types list() and tuple().
@@ -99,7 +99,7 @@ def _post_processor(instance, event: Optional[Event], operation: Operation): if "_state" in changes: del changes["_state"] else: - changes = json.dumps(event.changes if event else dict(), cls=LogJsonEncoder) + changes = json.dumps(event.changes if event else {}, cls=LogJsonEncoder) except Exception: logger.warning(f"Failed to log {event}", exc_info=True) return
right-sidebar: Fix clicking between Users label and search icon. This brings the right sidebar UI to match the similar widget in the left sidebar. Since there's no other plausible effect for a click in this whitespace, this small tweak should make using Zulip a bit more convenient. Fixes
@@ -473,7 +473,7 @@ exports.initialize = function () { exports.set_user_list_filter_handlers(); $('#clear_search_people_button').on('click', exports.clear_search); - $('#user_filter_icon, #userlist-title').click(exports.toggle_filter_displayed); + $('#userlist-header').click(exports.toggle_filter_displayed); // Let the server know we're here, but pass "false" for // want_redraw, since we just got all this info in page_params.
Allow updating AuthKey.key This will preserve references to the AuthKey more cleanly.
@@ -20,7 +20,18 @@ class AuthKey: """ self.key = data - with BinaryReader(sha1(self.key).digest()) as reader: + @property + def key(self): + return self._key + + @key.setter + def key(self, value): + if not value: + self._key = self.aux_hash = self.key_id = None + return + + self._key = value + with BinaryReader(sha1(self._key).digest()) as reader: self.aux_hash = reader.read_long(signed=False) reader.read(4) self.key_id = reader.read_long(signed=False) @@ -39,5 +50,8 @@ class AuthKey: # Calculates the message key from the given data return int.from_bytes(sha1(data).digest()[4:20], 'little', signed=True) + def __bool__(self): + return bool(self._key) + def __eq__(self, other): - return isinstance(other, type(self)) and other.key == self.key + return isinstance(other, type(self)) and other.key == self._key
Update .travis.yml Skip pylint on nightly build
@@ -16,7 +16,7 @@ install: - pip install . script: # https://github.com/PyCQA/pylint/issues/1113 - - if [[ $TRAVIS_PYTHON_VERSION != 3.6 ]]; then pylint --rcfile .pylintrc -E rtv/; fi + - if [[ $TRAVIS_PYTHON_VERSION != 3.6 && $TRAVIS_PYTHON_VERSION != nightly]]; then pylint --rcfile .pylintrc -E rtv/; fi - coverage run -m py.test -v after_success: - coveralls
Update swmr.rst Fixing broken link - issue
@@ -37,7 +37,7 @@ creating the file. The HDF Group has documented the SWMR features in details on the website: -`Single-Writer/Multiple-Reader (SWMR) Documentation <http://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html>`_. +`Single-Writer/Multiple-Reader (SWMR) Documentation <https://support.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html>`_. This is highly recommended reading for anyone intending to use the SWMR feature even through h5py. For production systems in particular pay attention to the file system requirements regarding POSIX I/O semantics.
[IMPR] use str.format method usability use str.format instead of concating indexed variables
@@ -16,7 +16,7 @@ Command-line arguments: Example: "python pwb.py surnames_redirects -start:B" """ # -# (C) Pywikibot team, 2017 +# (C) Pywikibot team, 2017-2018 # # Distributed under the terms of the MIT license. # @@ -64,26 +64,22 @@ class SurnamesBot(ExistingPageBot, FollowRedirectPageBot): if self.getOption('surnames_last'): name_parts = name.split(', ') if len(name_parts) == 2 and len(name.split(' ')) <= 3: - possible_names.append(name_parts[1] + ' ' + name_parts[0]) + possible_names.append('{1} {0}'.format(*name_parts)) else: words = name.split() if len(words) == 2 and name == name.title(): - possible_names.append(words[1] + ', ' + words[0]) + possible_names.append('{1}, {0}'.format(*words)) elif len(words) == 3: # title may have at most one non-titlecased word if len(SequenceMatcher(None, name, name.title()).get_matching_blocks()) <= 3: - possible_names.append(words[1] + ' ' + - words[2] + ', ' + - words[0]) - possible_names.append(words[2] + ', ' + - words[0] + ' ' + - words[1]) + possible_names.append('{1} {2}, {0}'.format(*words)) + possible_names.append('{2}, {0} {1}'.format(*words)) for possible_name in possible_names: # append disambiguation inside parenthesis if there is one if len(split_title) == 2: - possible_name += ' (' + split_title[1] + possible_name += ' ({1}'.format(*split_title) new_page = pywikibot.Page(site, possible_name) if new_page.exists():
utils/travis-script.sh: update error patterns for testsuite logs Now that the testsuite is based on e3.testsuite, error patterns are different. TN:
@@ -25,5 +25,5 @@ gprbuild -v --disable-ocaml \ | tee TESTSUITE_OUT -# Exit with an error if there is a FAILED line in TESTSUITE_OUT -! grep "FAILED " TESTSUITE_OUT > /dev/null +# Exit with an error if there is a FAIL or ERROR line in TESTSUITE_OUT +! grep "^INFO \+\(FAIL\|ERROR\) " TESTSUITE_OUT > /dev/null
Fix scheduler updatiing. Scheduler was incorrectly stepped in the batch loop instead of in the epoch loop, causing the LR to be updated after each batch instead of after each epoch.
@@ -263,7 +263,7 @@ def cmd_train(context): loss.backward() optimizer.step() - scheduler.step() + num_steps += 1 # Only write sample at the first step @@ -284,6 +284,7 @@ def cmd_train(context): writer.add_image('Train/Ground Truth', grid_img, epoch) train_loss_total_avg = train_loss_total / num_steps + scheduler.step() tqdm.write(f"Epoch {epoch} training loss: {train_loss_total_avg:.4f}.") if context["loss"]["name"] == 'focal_dice':
update celeryctl.py update syntax for future celery v6
@@ -16,15 +16,15 @@ class CeleryConfig: redis_scheme = redis_scheme + "s" - BROKER_URL = "{}://{}:{}/{}".format( + broker_url = "{}://{}:{}/{}".format( redis_scheme, yeti_config.redis.host, yeti_config.redis.port, yeti_config.redis.database, ) - CELERY_TASK_SERIALIZER = "json" - CELERY_ACCEPT_CONTENT = ["json"] - CELERY_IMPORTS = ( + task_serializer = "json" + accept_content = ["json"] + imports = ( "core.config.celeryimports", "core.analytics_tasks", "core.exports.export", @@ -32,9 +32,9 @@ class CeleryConfig: "core.investigation", "plugins", ) - CELERY_TIMEZONE = "UTC" - CELERYD_POOL_RESTARTS = True - CELERY_ROUTES = { + timezone = "UTC" + worker_pool_restarts = True + task_routes = { "core.analytics_tasks.single": {"queue": "oneshot"}, "core.feed.update_feed": {"queue": "feeds"}, "core.exports.export.execute_export": {"queue": "exports"},
This makes the sls requisite_in system work Basically, this now works: ``` include: - stuff ls -l: cmd.run: - require_in: - sls: stuff ```
@@ -207,6 +207,20 @@ def find_name(name, state, high): return ext_id +def find_sls_ids(sls, high): + ''' + Scan for all ids in the given sls and return them in a dict; {name: state} + ''' + ret = [] + for nid, item in six.iteritems(high): + if item['__sls__'] == sls: + for st_ in item: + if st_.startswith('__'): + continue + ret.append((nid, st_)) + return ret + + def format_log(ret): ''' Format the state into a log message @@ -1520,28 +1534,36 @@ class State(object): if isinstance(items, list): # Formed as a list of requisite additions + hinges = [] for ind in items: if not isinstance(ind, dict): # Malformed req_in continue if len(ind) < 1: continue - _state = next(iter(ind)) - name = ind[_state] - if '.' in _state: + pstate = next(iter(ind)) + pname = ind[pstate] + if pstate == 'sls': + # Expand hinges here + hinges = find_sls_ids(pname, high) + else: + hinges.append((pname, pstate)) + if '.' in pstate: errors.append(( 'Invalid requisite in {0}: {1} for ' '{2}, in SLS \'{3}\'. Requisites must ' 'not contain dots, did you mean \'{4}\'?' .format( rkey, - _state, - name, + pstate, + pname, body['__sls__'], - _state[:_state.find('.')] + pstate[:pstate.find('.')] ) )) - _state = _state.split(".")[0] + pstate = pstate.split(".")[0] + for tup in hinges: + name, _state = tup if key == 'prereq_in': # Add prerequired to origin if id_ not in extend:
Fix: Updated kmscondition string to check in logs Updated the string used to validate Nooba with KMS integration, in the logs. Fixes:
@@ -3,7 +3,7 @@ import logging import pytest from ocs_ci.framework.pytest_customization.marks import tier1, skipif_no_kms -from ocs_ci.framework.testlib import MCGTest +from ocs_ci.framework.testlib import MCGTest, version from ocs_ci.ocs import constants, defaults from ocs_ci.ocs.resources import pod @@ -27,4 +27,8 @@ class TestNoobaaKMS(MCGTest): namespace=defaults.ROOK_CLUSTER_NAMESPACE, )[0] operator_logs = pod.get_pod_logs(pod_name=operator_pod["metadata"]["name"]) + if version.get_semantic_ocs_version_from_config() < version.VERSION_4_10: assert "found root secret in external KMS successfully" in operator_logs + else: + assert "setKMSConditionStatus Init" in operator_logs + assert "setKMSConditionStatus Sync" in operator_logs
impl Topology.get_groups This patch implements `Topology.get_groups` by returing an empty topology, similar to `TransformChainsTopology.getitem`.
@@ -168,7 +168,7 @@ class Topology(types.Singleton): The union of the given groups. ''' - raise NotImplementedError + return self.empty_like() def take(self, __indices: Union[numpy.ndarray, Sequence[int]]) -> 'Topology': '''Return the selected elements as a disconnected topology.
Fixed: ResourceWarning error when reading TeX-code from file When reading TeX code from a file (File - Open menu in TexText dialog) the following error has been thrown: ResourceWarning: unclosed file <_io.TextIOWrapper name'...' This patch fixes this error. Closes
@@ -648,7 +648,8 @@ class AskTextGTKSource(AskText): """ try: - text = open(path).read() + with open(path) as file_handle: + text = file_handle.read() except IOError: print("Couldn't load file: %s", path) return False
dash: add new cases in view of manually filtered data This is a quick kludge to make viewing blocked data easier. * call new_cases_and_deaths.add_new_cases after loading manual filtered data
@@ -24,6 +24,7 @@ from plotly import express as px from libs import pipeline from libs.datasets import combined_datasets from libs.datasets import dataset_utils +from libs.datasets import new_cases_and_deaths from libs.datasets import timeseries from libs.datasets.taglib import TagField from libs.qa import timeseries_stats @@ -117,6 +118,7 @@ class RepoWrapper: dataset = timeseries.MultiRegionDataset.from_wide_dates_csv( dataset_utils.MANUAL_FILTER_REMOVED_WIDE_DATES_CSV_PATH ).add_static_csv_file(dataset_utils.MANUAL_FILTER_REMOVED_STATIC_CSV_PATH) + dataset = new_cases_and_deaths.add_new_cases(dataset) elif dataset_name is DashboardFile.COMBINED_RAW: dataset = timeseries.MultiRegionDataset.from_compressed_pickle( dataset_utils.COMBINED_RAW_PICKLE_GZ_PATH
Add/Update unit test coverage for io.py SIM: cr
@@ -30,36 +30,20 @@ from ..resources.strings import prompts, strings LOG = logging.getLogger(__name__) -color_on = False - - -def start_color(): - global color_on - if not color_on and term_is_colorable(): - colorama.init() - color_on = True - return color_on - def term_is_colorable(): - return sys.stdout.isatty() # Live terminal + return sys.stdout.isatty() def bold(string): s = _convert_to_string(string) - if start_color(): + if term_is_colorable(): + colorama.init() return colorama.Style.BRIGHT + s + colorama.Style.NORMAL else: return s -def reset_all_color(): - if start_color(): - return colorama.Style.RESET_ALL - else: - return '' - - def _remap_color(color): if color.upper() == 'ORANGE': return 'YELLOW' @@ -70,7 +54,8 @@ def _remap_color(color): def color(color, string): s = _convert_to_string(string) - if start_color(): + if term_is_colorable(): + colorama.init() color = _remap_color(color) color_code = getattr(colorama.Fore, color.upper()) return color_code + s + colorama.Fore.RESET @@ -80,7 +65,8 @@ def color(color, string): def on_color(color, string): s = _convert_to_string(string) - if start_color(): + if term_is_colorable(): + colorama.init() color = _remap_color(color) color_code = getattr(colorama.Back, color.upper()) return color_code + s + colorama.Back.RESET @@ -163,10 +149,13 @@ def get_input(output, default=None): # Trim spaces output = next(_convert_to_strings([output])) - result = input(output + ': ').strip() - if not result: - result = default - return result + result = _get_input(output) + + return result or default + + +def _get_input(output): + return input(output + ': ').strip() def echo_with_pager(output):
Fix py_snowflake job for Nightly Release py_snowflake job is now being skipped in the nightly -adding github event `schedule` to the if check
@@ -66,13 +66,15 @@ jobs: py_snowflake: runs-on: ubuntu-latest - # Won't run if PR from fork or by dependabot, since that PR wouldn't have secrets access + # Runs triggered by PRs from forks or by dependabot won't run this job, since that PR wouldn't have secrets access # See: https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions + # Runs triggered by Release/RC are workflow_dispatch events ; Nightly is a schedule event if: | github.repository == 'streamlit/streamlit' && ( (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]') || (github.event_name == 'push') || - (github.event_name == 'workflow_dispatch') + (github.event_name == 'workflow_dispatch') || + (github.event_name == 'schedule') ) steps:
Changing Image Algorithm to Bilinear For now, the Streamlit team has agreed to use bilinear since that's what other browsers are using and the difference in quality is not very noticeable.
@@ -216,7 +216,7 @@ def _normalize_to_bytes(data, width, output_format): if width > 0 and actual_width > width: new_height = int(1.0 * actual_height * width / actual_width) - image = image.resize((width, new_height)) + image = image.resize((width, new_height), resample=Image.BILINEAR) data = _PIL_to_bytes(image, format=format, quality=90) mimetype = "image/" + format.lower()
Log python function name in interchange log lines This is for consistency with parsl.log which got function names in PR
@@ -592,7 +592,7 @@ def start_file_logger(filename, name='interchange', level=logging.DEBUG, format_ None. """ if format_string is None: - format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d %(processName)s(%(process)d) %(threadName)s [%(levelname)s] %(message)s" + format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d %(processName)s(%(process)d) %(threadName)s %(funcName)s [%(levelname)s] %(message)s" global logger logger = logging.getLogger(name)
Updated message, when number is less than 0 Updated message, when number is less than 0
@@ -8,7 +8,7 @@ use warnings; my ($prime) = @ARGV; if ( $prime <= 0 ) { - print("please input a non-negative integer"); + print("Usage: please input a non-negative integer"); } else { if ( $prime % 2 == 0 ) {
Adding iframe attributes to attribute whitelist To properly show YouTube-embeds, we need to allow some attributes on `iframe` tags. I've added all attributes that are normal for such an embed, including `width` and `height` (which we may not need if we want to make the frame resize to its parent).
@@ -373,6 +373,7 @@ WIKI_MARKDOWN_HTML_ATTRIBUTES = { 'img': ['class', 'id', 'src', 'alt', 'width', 'height'], 'section': ['class', 'id'], 'article': ['class', 'id'], + 'iframe': ['width', 'height', 'src', 'frameborder', 'allow', 'allowfullscreen'], } WIKI_MARKDOWN_HTML_WHITELIST = [
multiline KeyError msg python bug workaround Summary: make multiline KeyError msg readable by working around a python bug discussion: Pull Request resolved:
@@ -569,6 +569,11 @@ class _DataLoaderIter(object): self.rcvd_idx += 1 self._put_indices() if isinstance(batch, _utils.ExceptionWrapper): + # make multiline KeyError msg readable by working around + # a python bug https://bugs.python.org/issue2651 + if batch.exc_type == KeyError and "\n" in batch.exc_msg: + raise Exception("KeyError:" + batch.exc_msg) + else: raise batch.exc_type(batch.exc_msg) return batch
Return MRP credentials as string after pairing This fixes
@@ -47,7 +47,7 @@ class MrpPairingHandler(PairingHandler): raise exceptions.DeviceAuthenticationError('no pin given') self.service.credentials = \ - await self.pairing_procedure.finish_pairing(self.pin_code) + str(await self.pairing_procedure.finish_pairing(self.pin_code)) @property def device_provides_pin(self):
If.Expr: switch to ComputingExpr TN:
@@ -266,7 +266,7 @@ class If(AbstractExpression): Abstract expression for a conditional expression. """ - class Expr(ResolvedExpression): + class Expr(ComputingExpr): """ Resolved expression for a conditional expression. """ @@ -296,9 +296,6 @@ class If(AbstractExpression): def _render_pre(self): return render('properties/if_ada', expr=self) - def _render_expr(self): - return self.result_var.name.camel_with_underscores - @property def subexprs(self): return {'0-cond': self.cond,
Don't include TF tests on verbose option TF info includes an implicit test - if TF isn't available the command fails. We don't want the to come along implicitly with the verbose option.
@@ -127,7 +127,7 @@ def _print_info(check): _print_platform_info() _print_psutil_info(check) _print_tensorboard_info(check) - if check.args.tensorflow or check.args.verbose: + if check.args.tensorflow: _print_tensorflow_info(check) _print_cuda_info() _print_nvidia_tools_info()
Fix directory separtor under Windows. This patch fix
import json import locale import os +import platform import re import shutil import sys @@ -62,6 +63,8 @@ class Base(unittest.TestCase): @property def sep(self): + if platform.system().lower() == "windows": + return "\\" return os.sep def assert_called_once(self, mock_method):
Update publish-website.yml Add upgrade of pip to find tensorflow
@@ -11,6 +11,7 @@ jobs: - uses: actions/setup-python@v2 with: python-version: 3.x + - run: python -m pip install --upgrade pip - run: pip install mkdocs-material - run: pip install . --user - run: python docs/autogenerate_documentation.py
Fix typo in AssetMaterialization docstring Test Plan: N/A Reviewers: #docs, sashank
@@ -538,7 +538,7 @@ class AssetMaterialization( Args: asset_key (str|List[str]|AssetKey): A key to identify the materialized asset across pipeline runs - description (Optional[str]): A longer human-radable description of the materialized value. + description (Optional[str]): A longer human-readable description of the materialized value. metadata_entries (Optional[List[EventMetadataEntry]]): Arbitrary metadata about the materialized value. partition (Optional[str]): The name of the partition that was materialized.
Add unittest for slo_etag Related-Change-Id:
@@ -307,6 +307,32 @@ class TestSloMiddleware(SloTestCase): self.assertIsNone(parsed[0]['size_bytes']) self.assertEqual([(0, 40)], parsed[0]['range'].ranges) + def test_container_listing(self): + listing_json = json.dumps([{ + "bytes": 104857600, + "content_type": "application/x-troff-me", + "hash": "8de7b0b1551660da51d8d96a53b85531; " + "slo_etag=dc9947c2b53a3f55fe20c1394268e216", + "last_modified": "2018-07-12T03:14:39.532020", + "name": "test.me" + }]) + self.app.register( + 'GET', '/v1/a/c', + swob.HTTPOk, + {'Content-Type': 'application/json', + 'Content-Length': len(listing_json)}, + listing_json) + req = Request.blank('/v1/a/c', method='GET') + status, headers, body = self.call_slo(req) + self.assertEqual(json.loads(body), [{ + "slo_etag": '"dc9947c2b53a3f55fe20c1394268e216"', + "hash": "8de7b0b1551660da51d8d96a53b85531", + "name": "test.me", + "bytes": 104857600, + "last_modified": "2018-07-12T03:14:39.532020", + "content_type": "application/x-troff-me", + }]) + class TestSloPutManifest(SloTestCase):
LandBOSSE second integration Follow the commenting convention of the rest of the subsystems to name the LandBOSSE BOS integration.
@@ -150,7 +150,7 @@ class LandBasedTurbine(Group): self.add_subsystem('plantfinancese', PlantFinance(verbosity=self.options['VerbosityCosts']), promotes=['machine_rating', 'lcoe']) - # Because of the promotes=['*'] should not need the LandBOSSE prefix. + # BOS Calculation self.add_subsystem('landbosse', LandBOSSE(), promotes=['*']) # Set up connections
Add some new tests on BALANCE opcode Test that Balance gas cost works when cold or warm Test precompiles are already in access_list
@@ -1227,7 +1227,7 @@ def test_chainid(vm_class, chain_id, expected_result): opcode_values.BALANCE, ), None, - 3 + 700, # balance now costs more + 3 + 2600, # balance now costs more ), ( BerlinVM, @@ -1339,6 +1339,44 @@ def test_balance(vm_class, code, expect_exception, expect_gas_used): ), 3 + 700, ), + # querying the same address twice results in a + # cold cost and a warm cost + ( + BerlinVM, + assemble( + opcode_values.PUSH20, + CANONICAL_ADDRESS_B, + opcode_values.BALANCE, + opcode_values.PUSH20, + CANONICAL_ADDRESS_B, + opcode_values.BALANCE, + ), + 3 + 2600 + 3 + 100, + ), + # querying two different addresses results in two + # cold costs + ( + BerlinVM, + assemble( + opcode_values.PUSH20, + CANONICAL_ADDRESS_A, + opcode_values.BALANCE, + opcode_values.PUSH20, + CANONICAL_ADDRESS_B, + opcode_values.BALANCE, + ), + 3 + 2600 + 3 + 2600, + ), + # precompiles are exempt from cold cost + ( + BerlinVM, + assemble( + opcode_values.PUSH20, + force_bytes_to_address(b'\x05'), + opcode_values.BALANCE, + ), + 3 + 100, + ), ) ) def test_gas_costs(vm_class, code, expect_gas_used):
Disable anchor link checker This is failing our builds now. This is due to the AWS Doc site using javascript to render the content now so even though the anchor links are correct, you won't be able to see them in the static HTML.
@@ -223,3 +223,7 @@ htmlhelp_basename = 'Chalicedoc' primary_domain = 'py' linkcheck_retries = 5 +# The AWS Doc site now uses javascript to dynamically render the content, +# so the anchors aren't going to exist in the static html content. This +# will fail the anchor checker so we have to disable this. +linkcheck_anchors = False
Update smtp-email-setup.rst Changed troubleshooting note
@@ -5,7 +5,9 @@ SMTP Email Setup To run in production, Mattermost requires SMTP email to be enabled for email notifications and password reset for systems using email-based authentication. -If you have any problems installing, see the `troubleshooting guide <https://www.mattermost.org/troubleshoot/>`__. To submit an improvement or correction, click Edit at the top of this page. +.. note:: + If you have any problems setting up, see + the `troubleshooting guide <https://www.mattermost.org/troubleshoot/>`_. To submit an improvement or correction, click **Edit** at the top of this page. How to Enable Email ~~~~~~~~~~~~~~~~~~~
Jenkins test fix This change increase the timeout for the jenkins test to check the voltha logs as well as do a retry on failure (2 retries).
@@ -506,13 +506,10 @@ class BuildMdTests(TestCase): # some # key messages in the logs print "Verify docker voltha logs are produced ..." - expected_output = ['coordinator._renew_session', 'main.heartbeat'] - cmd = command_defs['docker_voltha_logs'] - docker_voltha_logs = run_long_running_command_with_timeout(cmd, - 0.5, 5) - intersected_logs = [l for l in expected_output if - l in docker_voltha_logs] - self.assertEqual(len(intersected_logs), len(expected_output)) + self.wait_till('Basic voltha logs are absent', + self._is_basic_voltha_logs_produced, + interval=1, + timeout=30) finally: print "Stopping all containers ..." @@ -646,6 +643,15 @@ class BuildMdTests(TestCase): print "Not all consul services are ready ..." return res + def _is_basic_voltha_logs_produced(self): + expected_output = ['coordinator._renew_session', 'main.heartbeat'] + cmd = command_defs['docker_voltha_logs'] + docker_voltha_logs = run_long_running_command_with_timeout(cmd, + 10, 5) + intersected_logs = [l for l in expected_output if + l in docker_voltha_logs] + return len(intersected_logs) == len(expected_output) + def _run_consul(self): # run consul cmd = command_defs['docker_compose_start_consul']
Use https protocol for git in CircleCI GitHub is no longer supports the `git` protocol:
@@ -28,7 +28,7 @@ jobs: echo ${CI_PULL_REQUEST//*pull\//} | tee merge.txt if [[ $(cat merge.txt) != "" ]]; then echo "Merging $(cat merge.txt)"; - git remote add upstream git://github.com/scipy/scipy.git; + git remote add upstream https://github.com/scipy/scipy.git; git pull --ff-only upstream "refs/pull/$(cat merge.txt)/merge"; git fetch upstream master; fi
Fix DnD of toolbox items This broke when the method `_button_drag_data_get` was changed to a function: the `self` attribute was not removed. Fixes
@@ -201,7 +201,6 @@ class Toolbox(UIComponent): if Gtk.get_major_version() == 3: def _button_drag_data_get( - self, button: Gtk.Button, context: Gdk.DragContext, data: Gtk.SelectionData,
Sort Controllers in Ansible Inventory file Controller Group will now be sorted.
@@ -222,6 +222,13 @@ elif grep -q $uuid <<< {$blockstorage_uuids}; then echo " UserKnownHostsFile=/dev/null" | tee -a ${ssh_config_file} done +# Sort Controllers +controller_hn=( $( + for item in "${controller_hn[@]}" + do + echo "$item" + done | sort) ) + echo "" echo "---------------------------" echo "Creating ansible inventory file:"
add: Now scheduler can be shut down in a threaded task and put to on_hold (untested). New scheduler functionalities: .shut_down() .on_hold = True
from multiprocessing import Process, cpu_count import multiprocessing +import threading import traceback import warnings @@ -89,6 +90,11 @@ class Scheduler: if parameters: self.session.parameters.update(parameters) + # Controlling runtime (used by scheduler.disabled) + self._flag_enabled = threading.Event() + self._flag_shutdown = threading.Event() + self._flag_enabled.set() # Not on hold by default + def _register_instance(self): self.session.scheduler = self @@ -106,6 +112,8 @@ class Scheduler: self._setup() while not bool(self.shut_condition): + if self._flag_shutdown.is_set(): + break self._hibernate() self._run_cycle() @@ -143,7 +151,7 @@ class Scheduler: if task.on_startup or task.on_shutdown: # Startup or shutdown tasks are not run in main sequence pass - elif self.is_task_runnable(task): + elif self._flag_enabled.is_set() and self.is_task_runnable(task): # Run the actual task self.run_task(task) # Reset force_run as a run has forced @@ -387,6 +395,11 @@ class Scheduler: else: self.terminate_all(reason="shutdown") + def _wait_task_shutdown(self): + "Wait till all, especially threading tasks, are finished" + while self.n_alive > 0: + time.sleep(0.005) + def _shut_down(self, traceback=None, exception=None): """Shut down the scheduler This method is meant to controllably close the @@ -412,6 +425,7 @@ class Scheduler: self.logger.info(f"Shutting down tasks...") self._shut_down_tasks(traceback, exception) + self._wait_task_shutdown() self.logger.info(f"Shutdown completed. Good bye.") if isinstance(exception, SchedulerRestart): @@ -463,6 +477,21 @@ class Scheduler: self.logger.debug(f"Next run cycle at {delay} seconds.") return delay +# System control + @property + def on_hold(self): + return not self._flag_enabled.is_set() + + @on_hold.setter + def on_hold(self, value): + if value: + self._flag_enabled.clear() + else: + self._flag_enabled.set() + + def shut_down(self): + self.on_hold = False # In case was set to wait + self._flag_shutdown.set() # Logging @property
Fix test failures on Windows Either we were assuming gzip was included (not the case on Windows CI), or we were not closing the file (so cleanup failed to remove the file).
@@ -10,7 +10,7 @@ class TestWriteDirectChunk(TestCase): def test_write_direct_chunk(self): filename = self.mktemp().encode() - filehandle = h5py.File(filename, "w") + with h5py.File(filename, "w") as filehandle: dataset = filehandle.create_dataset("data", (100, 100, 100), maxshape=(None, 100, 100), @@ -24,21 +24,21 @@ class TestWriteDirectChunk(TestCase): dataset.id.write_direct_chunk((index, 0, 0), a.tostring(), filter_mask=1) array[index] = a - filehandle.close() # checking - filehandle = h5py.File(filename, "r") + with h5py.File(filename, "r") as filehandle: for i in range(10): read_data = filehandle["data"][i] numpy.testing.assert_array_equal(array[i], read_data) @ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 10, 2), 'Direct Chunk Reading requires HDF5 >= 1.10.2') [email protected]('gzip' not in h5py.filters.encode, "DEFLATE is not installed") class TestReadDirectChunk(TestCase): def test_read_compressed_offsets(self): filename = self.mktemp().encode() - filehandle = h5py.File(filename, "w") + with h5py.File(filename, "w") as filehandle: frame = numpy.arange(16).reshape(4, 4) frame_dataset = filehandle.create_dataset("frame", @@ -88,7 +88,7 @@ class TestReadDirectChunk(TestCase): def test_read_write_chunk(self): filename = self.mktemp().encode() - filehandle = h5py.File(filename, "w") + with h5py.File(filename, "w") as filehandle: # create a reference frame = numpy.arange(16).reshape(4, 4) @@ -108,9 +108,8 @@ class TestReadDirectChunk(TestCase): # copy the data dataset.id.write_direct_chunk((0, 0), compressed_frame, filter_mask=filter_mask) - filehandle.close() # checking - filehandle = h5py.File(filename, "r") + with h5py.File(filename, "r") as filehandle: dataset = filehandle["created"][...] numpy.testing.assert_array_equal(dataset, frame)
fix: stabilize leaflet map disable flyToBounds invalidateSize after loading to avoid gray tiles
@@ -58,7 +58,7 @@ frappe.ui.form.ControlGeolocation = class ControlGeolocation extends frappe.ui.f })); this.add_non_group_layers(data_layers, this.editableLayers); try { - this.map.flyToBounds(this.editableLayers.getBounds(), { + this.map.fitBounds(this.editableLayers.getBounds(), { padding: [50,50] }); } @@ -66,10 +66,10 @@ frappe.ui.form.ControlGeolocation = class ControlGeolocation extends frappe.ui.f // suppress error if layer has a point. } this.editableLayers.addTo(this.map); - this.map._onResize(); - } else if ((value===undefined) || (value == JSON.stringify(new L.FeatureGroup().toGeoJSON()))) { - this.locate_control.start(); + } else { + this.map.setView(frappe.utils.map_defaults.center, frappe.utils.map_defaults.zoom); } + this.map.invalidateSize(); } bind_leaflet_map() { @@ -97,8 +97,7 @@ frappe.ui.form.ControlGeolocation = class ControlGeolocation extends frappe.ui.f }); L.Icon.Default.imagePath = '/assets/frappe/images/leaflet/'; - this.map = L.map(this.map_id).setView(frappe.utils.map_defaults.center, - frappe.utils.map_defaults.zoom); + this.map = L.map(this.map_id); L.tileLayer(frappe.utils.map_defaults.tiles, frappe.utils.map_defaults.options).addTo(this.map);
[ci/lint] Fix linkcheck flakiness As seen in
@@ -163,6 +163,7 @@ linkcheck_ignore = [ "https://scikit-optimize.github.io/stable/modules/", "https://www.oracle.com/java/technologies/javase-jdk15-downloads.html", # forbidden for client r"https://huggingface.co/*", # seems to be flaky + r"https://www.meetup.com/*", # seems to be flaky ] # -- Options for HTML output ----------------------------------------------
expose namespace config for scheduled workflow this config allow specifying what namespace the persistent agent listen to
@@ -16,4 +16,6 @@ COPY --from=builder /bin/controller /bin/controller COPY --from=builder /go/src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt RUN chmod +x /bin/controller -CMD /bin/controller -alsologtostderr=true +ENV NAMESPACE "" + +CMD /bin/controller --alsologtostderr=true --namespace=${NAMESPACE}
added 'mod_spatialite.7.dylib' fix for darwin arm
@@ -2062,7 +2062,7 @@ class Archive(object): except sqlite3.OperationalError: continue elif platform.system() == 'Darwin': - for option in ['mod_spatialite.so']: # , 'mod_spatialite.dylib']: + for option in ['mod_spatialite.so', 'mod_spatialite.7.dylib']: # , 'mod_spatialite.dylib']: try: dbapi_conn.load_extension(option) except sqlite3.OperationalError:
Fix for issue adding support for mag_rtn_1min, mag_sc_1min, as well as mag_rtn_4_sa_per_cyc and mag_sc_4_sa_per_cyc datatypes
@@ -36,6 +36,14 @@ def load(trange=['2018-11-5', '2018-11-6'], file_resolution = 24*3600. if instrument == 'fields': + # 4_per_cycle and 1min are daily, not 6h like the full resolution 'mag_(rtn|sc)' + if datatype == 'mag_rtn_1min' or datatype == 'mag_sc_1min': + pathformat = instrument + '/' + level + '/' + datatype + '/%Y/psp_fld_' + level + '_' + datatype + '_%Y%m%d_v??.cdf' + elif datatype == 'mag_rtn_4_per_cycle' or datatype == 'mag_rtn_4_sa_per_cyc': + pathformat = instrument + '/' + level + '/mag_rtn_4_per_cycle/%Y/psp_fld_' + level + '_mag_rtn_4_sa_per_cyc_%Y%m%d_v??.cdf' + elif datatype == 'mag_sc_4_per_cycle' or datatype == 'mag_sc_4_sa_per_cyc': + pathformat = instrument + '/' + level + '/mag_sc_4_per_cycle/%Y/psp_fld_' + level + '_mag_sc_4_sa_per_cyc_%Y%m%d_v??.cdf' + else: pathformat = instrument + '/' + level + '/' + datatype + '/%Y/psp_fld_' + level + '_' + datatype + '_%Y%m%d%H_v??.cdf' file_resolution = 6*3600. elif instrument == 'spc':
update CircleCI config Summary: Gym will be installed by tox before running unittests. No need to install Gym outside of virtual env. Pull Request resolved:
@@ -314,7 +314,7 @@ jobs: steps: - checkout_merge - pip_install: - install_gym: true + install_gym: false is_ubuntu_gpu: true - run_unittest: tox_env: circleci_ranking_unittest @@ -328,7 +328,7 @@ jobs: steps: - checkout_merge - pip_install: - install_gym: true + install_gym: false is_ubuntu_gpu: true - run_unittest: tox_env: circleci_training_unittest @@ -342,7 +342,7 @@ jobs: steps: - checkout_merge - pip_install: - install_gym: true + install_gym: false is_ubuntu_gpu: true - run_unittest: tox_env: circleci_prediction_unittest @@ -356,7 +356,7 @@ jobs: steps: - checkout_merge - pip_install: - install_gym: true + install_gym: false is_ubuntu_gpu: true - run_unittest: tox_env: circleci_world_model_unittest
Fix a bug that FairseqSimulSTAgent is not an agent Summary: Pull Request resolved:
@@ -10,12 +10,11 @@ from fairseq.file_io import PathManager try: from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS + from simuleval.agents import SpeechAgent from simuleval.states import ListEntry, SpeechStates except ImportError: print("Please install simuleval 'pip install simuleval'") -from torch import nn - SHIFT_SIZE = 10 WINDOW_SIZE = 25 SAMPLE_RATE = 16000 @@ -113,12 +112,12 @@ class TensorListEntry(ListEntry): } -class FairseqSimulSTAgent(nn.Module): +class FairseqSimulSTAgent(SpeechAgent): speech_segment_size = 40 # in ms, 4 pooling ratio * 10 ms step size def __init__(self, args): - super().__init__() + super().__init__(args) self.eos = DEFAULT_EOS @@ -218,6 +217,9 @@ class FairseqSimulSTAgent(nn.Module): task_args = state["cfg"]["task"] task_args.data = args.data_bin + if args.config is not None: + task_args.config_yaml = args.config + task = self.set_up_task(task_args) # build model for ensemble
Show error message to user if they are trying to swap names/codes for two locations or trying to use name/code for a location to another Doing such an update in a single request results in IntegrityError at database due to unique constraint while the updates are being done sequentially.
@@ -334,6 +334,8 @@ class LocationTypesView(BaseDomainView): loc_types = payload['loc_types'] pks = [] + payload_loc_type_name_by_pk = {} + payload_loc_type_code_by_pk = {} for loc_type in loc_types: for prop in ['name', 'parent_type', 'administrative', 'shares_cases', 'view_descendants', 'pk']: @@ -342,14 +344,33 @@ class LocationTypesView(BaseDomainView): pk = loc_type['pk'] if not _is_fake_pk(pk): pks.append(loc_type['pk']) - - names = [lt['name'] for lt in loc_types] + payload_loc_type_name_by_pk[loc_type['pk']] = loc_type['name'] + if loc_type['code']: + payload_loc_type_code_by_pk[loc_type['pk']] = loc_type['code'] + names = payload_loc_type_name_by_pk.values() names_are_unique = len(names) == len(set(names)) - codes = [lt['code'] for lt in loc_types if lt['code']] + codes = payload_loc_type_code_by_pk.values() codes_are_unique = len(codes) == len(set(codes)) if not names_are_unique or not codes_are_unique: raise LocationConsistencyError("'name' and 'code' are supposed to be unique") + current_location_types = LocationType.objects.by_domain(request.domain) + for location_type in current_location_types: + if location_type.pk in payload_loc_type_name_by_pk: + # to check if the name/code was swapped with another location by confirming if + # either name/code has changed but the current name is still present in the names/codes passed + if ( + (location_type.name != payload_loc_type_name_by_pk.get(location_type.pk) and + location_type.name in names) or + (location_type.code != payload_loc_type_code_by_pk[location_type.pk] and + location_type.code in codes) + ): + messages.error(request, LocationConsistencyError(_( + "Looks like you are assigning a location name/code to a different location " + "in the same request. Please do this in two separate updates by using a " + "temporary name to free up the name/code to be re-assigned.")) + ) + return self.get(request, *args, **kwargs) hierarchy = self.get_hierarchy(loc_types) if not self.remove_old_location_types(pks):
Wireframe : Reserve space for `edgesVisited` This gives around a 2x speedup.
@@ -115,6 +115,7 @@ struct MakeWireframe using Edge = std::pair<int, int>; using EdgeSet = unordered_set<Edge, boost::hash<Edge>>; EdgeSet edgesVisited; + edgesVisited.reserve( mesh->variableSize( PrimitiveVariable::FaceVarying ) ); int vertexIdsIndex = 0; for( int numVertices : mesh->verticesPerFace()->readable() )
Retry termination task in case of failure Review-Url:
@@ -462,6 +462,35 @@ def clear_lease_request(key, request_id): machine_lease.put() [email protected] +def clear_termination_task(key, task_id): + """Clears the termination task associated with the given lease request. + + Args: + key: ndb.Key for a MachineLease entity. + task_id: ID for a termination task. + """ + machine_lease = key.get() + if not machine_lease: + logging.error('MachineLease does not exist\nKey: %s', key) + return + + if not machine_lease.termination_task: + return + + if task_id != machine_lease.termination_task: + logging.error( + 'Task ID mismatch\nKey: %s\nExpected: %s\nActual: %s', + key, + task_id, + machine_lease.task_id, + ) + return + + machine_lease.termination_task = None + machine_lease.put() + + @ndb.transactional def associate_termination_task(key, hostname, task_id): """Associates a termination task with the given lease request. @@ -741,6 +770,17 @@ def handle_termination_task(machine_lease): task_result_summary = task_pack.unpack_result_summary_key( machine_lease.termination_task).get() + if task_result_summary.state in task_result.State.STATES_EXCEPTIONAL: + logging.info( + 'Termination failed:\nKey: %s\nHostname: %s\nTask ID: %s\nState: %s', + machine_lease.key, + machine_lease.hostname, + machine_lease.termination_task, + task_result.State.to_string(task_result_summary.state), + ) + clear_termination_task(machine_lease.key, machine_lease.termination_task) + return + if task_result_summary.state == task_result.State.COMPLETED: # There is a race condition where the bot reports the termination task as # completed but hasn't exited yet. The last thing it does before exiting
Update supported_instruments.rst Added sport_ivm
@@ -92,6 +92,12 @@ ROCSAT-1 IVM .. automodule:: pysat.instruments.rocsat1_ivm :members: __doc__ +Sport IVM +--------- + +.. automodule:: pysat.instruments.sport_ivm + :members: __doc__ + SuperDARN ---------
Drop the long VideoFrame.pict_type names. Sticking with FFmpeg names for the moment.
@@ -19,33 +19,16 @@ cdef VideoFrame alloc_video_frame(): return VideoFrame.__new__(VideoFrame, _cinit_bypass_sentinel) -cdef EnumType _PictureType = define_enum('PictureType', ( - - ('UNKNOWN', lib.AV_PICTURE_TYPE_NONE), +cdef EnumType PictureType = define_enum('PictureType', ( ('NONE', lib.AV_PICTURE_TYPE_NONE), - - ('INTRA', lib.AV_PICTURE_TYPE_I), ('I', lib.AV_PICTURE_TYPE_I), - - ('PREDICTED', lib.AV_PICTURE_TYPE_P), ('P', lib.AV_PICTURE_TYPE_P), - - ('BIDIRECTIONAL', lib.AV_PICTURE_TYPE_B), ('B', lib.AV_PICTURE_TYPE_B), - - ('SGMC_VOP', lib.AV_PICTURE_TYPE_S), # I'm guessing at a good name here. ('S', lib.AV_PICTURE_TYPE_S), - - ('SWITCHING_INTRA', lib.AV_PICTURE_TYPE_SI), ('SI', lib.AV_PICTURE_TYPE_SI), - - ('SWITCHING_PREDICTED', lib.AV_PICTURE_TYPE_SP), ('SP', lib.AV_PICTURE_TYPE_SP), - - ('BI', lib.AV_PICTURE_TYPE_BI), # I don't have a name here. - + ('BI', lib.AV_PICTURE_TYPE_BI), )) -PictureType = _PictureType cdef class VideoFrame(Frame): @@ -286,11 +269,11 @@ cdef class VideoFrame(Frame): @property def pict_type(self): - return _PictureType.get(self.ptr.pict_type, create=True) + return PictureType.get(self.ptr.pict_type, create=True) @pict_type.setter def pict_type(self, value): - self.ptr.pict_type = _PictureType[value].value + self.ptr.pict_type = PictureType[value].value def to_rgb(self, **kwargs): """Get an RGB version of this frame.
Replacing black with a more generic color (blue) The change will make the link look more like a link and also ensure that it's visible in both: dark and light mode
@@ -1497,7 +1497,7 @@ class DownloaderGUI: for (i, (key, label, callback)) in enumerate(info): Label(infoframe, text=label).grid(column=0, row=i, sticky="e") entry = Entry( - infoframe, font="courier", relief="groove", disabledforeground="black" + infoframe, font="courier", relief="groove", disabledforeground="#007aff" ) self._info[key] = (entry, callback) entry.bind("<Return>", self._info_save)
Fixes Updated Docstring under Schedule.draw() * Fixes : A small change in docstring. Under the `PulseQobj` Class, the method `to_dict()` had a statement that it returns `QasmQobj` instead of `PulseQobj` * Fixes Docstring Updated of Schedule.draw()
@@ -650,7 +650,7 @@ class Schedule(ScheduleComponent): table: Draw event table for supported commands. label: Label individual instructions. framechange: Add framechange indicators. - channels: A list of channel names to plot. + channels: A list of Channels to plot. show_framechange_channels: Plot channels with only framechanges. Additional Information:
Fix deep / broken URL for service account setup. Closes
@@ -30,6 +30,11 @@ NOW = datetime.datetime.utcnow # To be replaced by tests. MULTIPLE_SPACES_RE = r"\s+" MULTIPLE_SPACES = re.compile(MULTIPLE_SPACES_RE) +SERVICE_ACCOUNT_URL = ( + "https://googleapis.dev/python/google-api-core/latest/" + "auth.html#setting-up-a-service-account" +) + def ensure_signed_credentials(credentials): """Raise AttributeError if the credentials are unsigned. @@ -42,16 +47,11 @@ def ensure_signed_credentials(credentials): of :class:`google.auth.credentials.Signing`. """ if not isinstance(credentials, google.auth.credentials.Signing): - auth_uri = ( - "https://google-cloud-python.readthedocs.io/en/latest/" - "core/auth.html?highlight=authentication#setting-up-" - "a-service-account" - ) raise AttributeError( "you need a private key to sign credentials." - "the credentials you are currently using %s " - "just contains a token. see %s for more " - "details." % (type(credentials), auth_uri) + "the credentials you are currently using {} " + "just contains a token. see {} for more " + "details.".format(type(credentials), SERVICE_ACCOUNT_URL) )
remove explicit keyring backend selection Fixes
@@ -308,14 +308,7 @@ class DictConfig(ObjectConfig): @staticmethod def get_value_from_keyring(secure_value_key, user_name): - import keyrings.cryptfile.cryptfile - keyrings.cryptfile.cryptfile.CryptFileKeyring.keyring_key = "none" - import keyring - if (isinstance(keyring.get_keyring(), keyring.backends.fail.Keyring) or - isinstance(keyring.get_keyring(), keyring.backends.chainer.ChainerBackend)): - keyring.set_keyring(keyrings.cryptfile.cryptfile.CryptFileKeyring()) - logging.getLogger("keyring").info( "Using keyring '" + keyring.get_keyring().name + "' to retrieve: " + secure_value_key) return keyring.get_password(service_name=secure_value_key, username=user_name)
Always have 'value' in filter field dict tests could pass parameters without values which threw a template variable exception in tests
@@ -248,6 +248,8 @@ class CAPFiltering(FilteringFilterBackend): if f in options: fields[f]['options'] = options[f] + else: + fields[f]['value'] = None if f in filter_values: fields[f]['value'] = ' '.join(filter_values[f])
lint exception need to revisit 36 vs 39 lint rules, this kind of thing should not happen!
@@ -35,7 +35,7 @@ class _CombinedFormatter( pass -# pylint: disable=too-many-locals, too-many-arguments, too-many-branches +# pylint: disable=too-many-locals, too-many-arguments, too-many-branches, too-many-statements def generate_components( components_source, project_shortname,
[Dashboard] Fix a bug from object store percentage bar Fixing 2 issues. When the used memory == 0%, it doesn't display anything (we should rather display the [0/total] bar). It currently doesn't display the %.
@@ -110,7 +110,9 @@ const NodeRow = ({ node, expanded, onExpandButtonClick }: NodeRowProps) => { total={objectStoreTotalMemory} > {memoryConverter(raylet.objectStoreUsedMemory)}/ - {memoryConverter(objectStoreTotalMemory)} + {memoryConverter(objectStoreTotalMemory)}( + {(raylet.objectStoreUsedMemory / objectStoreTotalMemory).toFixed(2)} + %) </PercentageBar> )} </TableCell>
Xfail the two tests due to mis-design Currently check_drift on components doesn't consider changing deployments as drift, and so those tests fail.
@@ -85,9 +85,11 @@ class TestInterDeploymentDependenciesInfrastructure(AgentlessTestCase): client=self.client) self._assert_dependencies_count(0) + @pytest.mark.xfail # RD-5336 def test_dependencies_are_updated(self): self._test_dependencies_are_updated(skip_uninstall=False) + @pytest.mark.xfail # RD-5336 def test_dependencies_are_updated_but_keeps_old_dependencies(self): self._test_dependencies_are_updated(skip_uninstall=True)
Reflect asanyarray behaviour in block array kwargs set to copy=False, subok=True
@@ -408,11 +408,11 @@ def _block(arrays, depth=0): list_ndim = list_ndims[0] arr_ndim = max(arr.ndim for arr in arrs) ndim = max(list_ndim, arr_ndim) - arrs = [_nx.array(a, ndmin=ndim) for a in arrs] + arrs = [array(a, ndmin=ndim, copy=False, subok=True) for a in arrs] return _nx.concatenate(arrs, axis=depth+ndim-list_ndim), list_ndim else: # We've 'bottomed out' - return _nx.array(arrays, ndmin=depth), depth + return array(arrays, ndmin=depth, copy=False, subok=True), depth def block(arrays):
Add a missing parameter to wrapper for trackpy local max peak finder The `trackpy.locate()` wrapped by starfish is missing the `max_iterations` parameter which allows additional refinement of detected spot's center of mass. see:
@@ -56,6 +56,8 @@ class TrackpyLocalMaxPeakFinder(DetectSpotsAlgorithmBase): name of the function used to calculate the intensity for each identified spot area preprocess : boolean Set to False to turn off bandpass preprocessing. + max_iterations : integer + Max number of loops to refine the center of mass, default 10 is_volume : bool if True, run the algorithm on 3d volumes of the provided stack verbose : bool @@ -70,8 +72,8 @@ class TrackpyLocalMaxPeakFinder(DetectSpotsAlgorithmBase): def __init__( self, spot_diameter, min_mass, max_size, separation, percentile=0, noise_size: Tuple[int, int, int] = (1, 1, 1), smoothing_size=None, threshold=None, - preprocess: bool = False, measurement_type: str = 'max', is_volume: bool = False, - verbose=False) -> None: + preprocess: bool = False, max_iterations: int = 10, measurement_type: str = 'max', + is_volume: bool = False, verbose=False) -> None: self.diameter = spot_diameter self.minmass = min_mass @@ -83,6 +85,7 @@ class TrackpyLocalMaxPeakFinder(DetectSpotsAlgorithmBase): self.threshold = threshold self.measurement_function = self._get_measurement_function(measurement_type) self.preprocess = preprocess + self.max_iterations = max_iterations self.is_volume = is_volume self.verbose = verbose @@ -114,7 +117,8 @@ class TrackpyLocalMaxPeakFinder(DetectSpotsAlgorithmBase): smoothing_size=self.smoothing_size, threshold=self.threshold, percentile=self.percentile, - preprocess=self.preprocess + preprocess=self.preprocess, + max_iterations=self.max_iterations, ) # when zero spots are detected, 'ep' is missing from the trackpy locate results. @@ -186,11 +190,13 @@ class TrackpyLocalMaxPeakFinder(DetectSpotsAlgorithmBase): @click.option( "--smoothing-size", default=None, type=int, help="odd integer. Size of boxcar (moving average) filter in pixels. Default is the " - "Diameter" - ) + "Diameter") @click.option( "--preprocess", is_flag=True, help="if passed, gaussian and boxcar filtering are applied") + @click.option( + "--max-iterations", default=10, type=int, + help="Max number of loops to refine the center of mass. Default is 10") @click.option( "--show", default=False, is_flag=True, help="display results visually") @click.option( @@ -202,9 +208,10 @@ class TrackpyLocalMaxPeakFinder(DetectSpotsAlgorithmBase): help="indicates that the image stack should be filtered in 3d") @click.pass_context def _cli(ctx, spot_diameter, min_max, max_size, separation, noise_size, smoothing_size, - preprocess, show, percentile, is_volume): + preprocess, max_iterations, show, percentile, is_volume): instance = TrackpyLocalMaxPeakFinder(spot_diameter, min_max, max_size, separation, noise_size, smoothing_size, - preprocess, show, percentile, is_volume) + preprocess, max_iterations, show, + percentile, is_volume) ctx.obj["component"]._cli_run(ctx, instance)
Update activity_magic.py Replacing fcntl with portalocker to support windows machines in addition to unix based os.
@@ -162,11 +162,11 @@ class Activity(object): print(barvalues) def handle_submit(self, sender): - import fcntl - with open(self.results_filename, "a+") as g: - fcntl.flock(g, fcntl.LOCK_EX) + import portalocker,os + with portalocker.Lock(self.results_filename, "a+") as g: g.write("%s::%s::%s::%s\n" % (self.id, getpass.getuser(), datetime.datetime.today(), sender.description)) - fcntl.flock(g, fcntl.LOCK_UN) + g.flush() + os.fsync(g.fileno()) self.output.clear_output() with self.output: print("Received: " + sender.description)
Update losses.py Fix incorrect comments.
@@ -1030,7 +1030,7 @@ def ctc_loss_kd(log_probs, targets, input_lens, blank_index, device): def ce_kd(inp, target): - """Simple version of distillation fro cross-entropy loss. + """Simple version of distillation for cross-entropy loss. Arguments ---------
Fix returning and logging error instead of just returning string. Kwarg roles moved to the end of the list of positional arguments to preserve API.
@@ -172,7 +172,9 @@ def version(user=None, password=None, host=None, port=None, database='admin', au ''' conn = _connect(user, password, host, port, authdb=authdb) if not conn: - return 'Failed to connect to mongo database' + err_msg = "Failed to connect to MongoDB database {0}:{1}".format(host, port) + log.error(err_msg) + return (False, err_msg) try: mdb = pymongo.database.Database(conn, database) @@ -253,8 +255,8 @@ def user_exists(name, user=None, password=None, host=None, port=None, return False -def user_create(name, passwd, roles=None, user=None, password=None, host=None, port=None, - database='admin', authdb=None): +def user_create(name, passwd, user=None, password=None, host=None, port=None, + database='admin', authdb=None, roles=None): ''' Create a Mongodb user
Manually construct reply_to for send_message result Closes
@@ -880,7 +880,8 @@ class MessageMethods: media=result.media, entities=result.entities, reply_markup=request.reply_markup, - ttl_period=result.ttl_period + ttl_period=result.ttl_period, + reply_to=types.MessageReplyHeader(request.reply_to_msg_id) ) message._finish_init(self, {}, entity) return message
add size_type and difference_type to vector and update insert calls to return interator (true as of C++11)
@@ -2,13 +2,20 @@ cdef extern from "<vector>" namespace "std" nogil: cdef cppclass vector[T,ALLOCATOR=*]: ctypedef T value_type ctypedef ALLOCATOR allocator_type + + # these should really be allocator_type.size_type and + # allocator_type.difference_type to be true to the C++ definition + # but cython doesn't support defered access on template arguments + ctypedef size_t size_type + ctypedef ptrdiff_t difference_type + cppclass iterator: T& operator*() iterator operator++() iterator operator--() - iterator operator+(size_t) - iterator operator-(size_t) - size_t operator-(iterator) + iterator operator+(size_type) + iterator operator-(size_type) + difference_type operator-(iterator) bint operator==(iterator) bint operator!=(iterator) bint operator<(iterator) @@ -19,8 +26,8 @@ cdef extern from "<vector>" namespace "std" nogil: T& operator*() iterator operator++() iterator operator--() - iterator operator+(size_t) - iterator operator-(size_t) + iterator operator+(size_type) + iterator operator-(size_type) bint operator==(reverse_iterator) bint operator!=(reverse_iterator) bint operator<(reverse_iterator) @@ -33,10 +40,10 @@ cdef extern from "<vector>" namespace "std" nogil: pass vector() except + vector(vector&) except + - vector(size_t) except + - vector(size_t, T&) except + + vector(size_type) except + + vector(size_type, T&) except + #vector[input_iterator](input_iterator, input_iterator) - T& operator[](size_t) + T& operator[](size_type) #vector& operator=(vector&) bint operator==(vector&, vector&) bint operator!=(vector&, vector&) @@ -44,13 +51,13 @@ cdef extern from "<vector>" namespace "std" nogil: bint operator>(vector&, vector&) bint operator<=(vector&, vector&) bint operator>=(vector&, vector&) - void assign(size_t, const T&) + void assign(size_type, const T&) void assign[input_iterator](input_iterator, input_iterator) except + - T& at(size_t) except + + T& at(size_type) except + T& back() iterator begin() const_iterator const_begin "begin"() - size_t capacity() + size_type capacity() void clear() bint empty() iterator end() @@ -59,19 +66,19 @@ cdef extern from "<vector>" namespace "std" nogil: iterator erase(iterator, iterator) T& front() iterator insert(iterator, const T&) except + - void insert(iterator, size_t, const T&) except + - void insert[Iter](iterator, Iter, Iter) except + - size_t max_size() + iterator insert(iterator, size_type, const T&) except + + iterator insert[Iter](iterator, Iter, Iter) except + + size_type max_size() void pop_back() void push_back(T&) except + reverse_iterator rbegin() - const_reverse_iterator const_rbegin "rbegin"() + const_reverse_iterator const_rbegin "crbegin"() reverse_iterator rend() - const_reverse_iterator const_rend "rend"() - void reserve(size_t) - void resize(size_t) except + - void resize(size_t, T&) except + - size_t size() + const_reverse_iterator const_rend "crend"() + void reserve(size_type) + void resize(size_type) except + + void resize(size_type, T&) except + + size_type size() void swap(vector&) # C++11 methods
Update quickstart.rst macOS brew cask command brew cask install +brew install --cask
@@ -97,7 +97,7 @@ On macOS:: brew install autoconf automake libtool openssl pkg-config brew tap homebrew/cask-versions - brew cask install homebrew/cask-versions/adoptopenjdk8 + brew install --cask homebrew/cask-versions/adoptopenjdk8 Installing Android SDK ~~~~~~~~~~~~~~~~~~~~~~
Add Age of Empires II API Modify URL Place entry in alphabetical order Add Age of Empires II API
@@ -354,6 +354,7 @@ API | Description | Auth | HTTPS | CORS | ### Games & Comics API | Description | Auth | HTTPS | CORS | |---|---|---|---|---| +| [Age of Empires II](https://age-of-empires-2-api.herokuapp.com) | Get information about Age of Empires II resources | No | Yes | Unknown | | [AmiiboAPI](http://www.amiiboapi.com/) | Amiibo Information | No | No | Yes | | [Battle.net](https://dev.battle.net/) | Blizzard Entertainment | `apiKey` | Yes | Unknown | | [Battlefield 4](https://bf4stats.com/api) | Battlefield 4 Information | No | Yes | Unknown |
now using digest auth by default fixes issue
@@ -182,7 +182,7 @@ def download(remote_path='', remote_file='', local_path='', local_file='', heade session = requests.Session() if username is not None: - session.auth = (username, password) + session.auth = requests.auth.HTTPDigestAuth(username, password) if headers.get('User-Agent') is None: try:
Minor fix for exploratory report Minor fix for exploratory report
@@ -140,8 +140,12 @@ public class BillingServiceImpl implements BillingService { public BillingReport getExploratoryBillingData(String project, String endpoint, String exploratoryName, List<String> compNames) { List<String> resourceNames = new ArrayList<>(compNames); resourceNames.add(exploratoryName); - List<BillingReportLine> billingData = billingDAO.findBillingData(project, endpoint, resourceNames); - final double sum = billingData.stream().mapToDouble(BillingReportLine::getCost).sum(); + List<BillingReportLine> billingReportLines = billingDAO.findBillingData(project, endpoint, resourceNames); + final double sum = billingReportLines.stream().mapToDouble(BillingReportLine::getCost).sum(); + List<BillingReportLine> billingData = billingReportLines + .stream() + .peek(bd -> bd.setCost(BigDecimal.valueOf(bd.getCost()).setScale(2, BigDecimal.ROUND_HALF_UP).doubleValue())) + .collect(Collectors.toList());; final String currency = billingData.stream().map(BillingReportLine::getCurrency).distinct().count() == 1 ? billingData.get(0).getCurrency() : null; return BillingReport.builder() .name(exploratoryName)
DOC: changelog update Updated changelog with `custom.attach` improvement summary.
@@ -24,6 +24,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). - Removed time.season_date_range - DeprecationWarning for strict_time_flag only triggered if sloppy data is found - Remove convenience methods imported from pandas + - Changed the default `custom.attatch` input to allow keyword arguement use when additional function input is required - Documentation - Added info on how to register new instruments - Fixed description of tag and sat_id behaviour in testing instruments