message
stringlengths
13
484
diff
stringlengths
38
4.63k
Update CONTRIBUTING.md Problem: Our contributing doc has outdated instructions for updating tezos via 'niv.' Solution: Update these instructions.
@@ -19,8 +19,10 @@ otherwise improve our project, pull requests are most welcome. - Tezos revision is located in the [`sources.json`](../nix/nix/sources.json) file. You can either update it manually to newer revision or use `niv` tool. - In order to do that run `niv update tezos` (this will update revision to latest). - `niv update tezos -a rev=...` will update wources to some commit. + In order to do that run + `niv update tezos -a ref=<tag from tezos repo> -a rev=<commit in this tag>` + from the [`nix/`](../nix) directory. This command will update sources to some commit + from the given tag. - Used tezos protocols can be changed by [`proto`](../script/proto) script. This script requires `jq` and `moreutils` to be installed. Currently used protocols are displayed in [`protocols.json`](../protocols.json).
ENH: added Table.head() and Table.tail() methods for displaying Table subsets Shows nice html in Jupyter notebooks
@@ -313,6 +313,20 @@ class Table(DictArray): self._repr_policy = dict(head=head, tail=tail, random=random) + def head(self, nrows=5): + """displays top nrows""" + repr_policy = self._repr_policy + self._repr_policy = dict(head=nrows, tail=None, random=None) + display(self) + self._repr_policy = repr_policy + + def tail(self, nrows=5): + """displays bottom nrows""" + repr_policy = self._repr_policy + self._repr_policy = dict(head=None, tail=nrows, random=None) + display(self) + self._repr_policy = repr_policy + @property def header(self): """returns header value"""
Add ASAP to Graph classification Readme This PR links ASAPool to the graph classification Readme.
@@ -14,6 +14,7 @@ Hyperparameter selection is performed for the number of hidden units and the num * **[GlobalAttention](https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/global_attention.py)** * **[Set2Set](https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/set2set.py)** * **[SortPool](https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/sort_pool.py)** +* **[ASAPool](https://github.com/rusty1s/pytorch_geometric/blob/master/benchmark/kernel/asap.py)** Run (or modify) the whole test suite via
llvm, functions/UDF: Store ctypes callback wrapper with Function instance rather than llvm function Fixes memory leak of both PNL and LLVM objects.
@@ -596,9 +596,9 @@ class UserDefinedFunction(Function_Base): value = self.custom_function(np.asfarray(variable), **llvm_params) _assign_to_carr(arg_out.contents, np.atleast_2d(value)) - builder.function.__wrapper_f = wrapper_ct(_wrapper) + self.__wrapper_f = wrapper_ct(_wrapper) # To get the right callback pointer, we need to cast to void* - wrapper_address = ctypes.cast(builder.function.__wrapper_f, ctypes.c_void_p) + wrapper_address = ctypes.cast(self.__wrapper_f, ctypes.c_void_p) # Direct pointer constants don't work wrapper_ptr = builder.inttoptr(pnlvm.ir.IntType(64)(wrapper_address.value), builder.function.type) builder.call(wrapper_ptr, [params, state, arg_in, arg_out])
ansible: blacklist everything except our own namespaces Farewell, pointless roundtrips, we hardly knew ye.
@@ -103,11 +103,8 @@ class StrategyModule(ansible.plugins.strategy.linear.StrategyModule): def run(self, iterator, play_context, result=0): self.router = mitogen.master.Router() - self.router.responder.blacklist('OpenSSL') - self.router.responder.blacklist('urllib3') - self.router.responder.blacklist('requests') - self.router.responder.blacklist('systemd') - self.router.responder.blacklist('selinux') + self.router.responder.whitelist_prefix('ansible') + self.router.responder.whitelist_prefix('ansible_mitogen') self.listener = mitogen.unix.Listener(self.router) os.environ['LISTENER_SOCKET_PATH'] = self.listener.path
Added a DeprecationWarning for Python 3.4 - Replaced use of six with sys.version_info for Python version determination (stdlib use over ext lib).
@@ -26,7 +26,7 @@ utils --- Miscelaneous utilities """ import gzip -import six +import sys import warnings from .version import _check_module_dependencies, __version__ @@ -36,12 +36,24 @@ def py2_deprecation_warning(): warnings.simplefilter('once') py2_warning = ('Python2 support is deprecated and will be removed in ' 'a future release. Consider switching to Python3.') - if six.PY2: + if sys.version_info.major == 2: warnings.warn(message=py2_warning, category=DeprecationWarning, stacklevel=4, ) + +def _py34_deprecation_warning(): + warnings.simplefilter('once') + py34_warning = ('Python 3.4 support is deprecated and will be removed in ' + 'a future release. Consider switching to Python 3.6 or 3.7.' + ) + if sys.version_info.major == 3 and sys.version_info.minor == 4: + warnings.warn(message=py34_warning, + category=DeprecationWarning, + stacklevel=3, + ) + _check_module_dependencies()
remove reference to removed property fixes
@@ -396,7 +396,6 @@ class ExportGLTF2_Base: if self.export_animations: col.prop(self, 'export_frame_range') col.prop(self, 'export_frame_step') - col.prop(self, 'export_move_keyframes') col.prop(self, 'export_force_sampling') col.prop(self, 'export_skins') if self.export_skins:
[runtime env] Add FAQ for runtime_env Adds some frequently asked user questions to the docs.
@@ -402,6 +402,40 @@ Example: {"pip": ["torch", "ray[serve]"], "env_vars": {"A": "a", "B": "new", "C", "c"}} +.. _runtime-env-faq: + +Frequently Asked Questions +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Are environments installed on every node? +""""""""""""""""""""""""""""""""""""""""" + +If a runtime environment is specified in ``ray.init(runtime_env=...)``, then the environment will be installed on every node. See :ref:`Per-Job <rte-per-job>` for more details. + +When is the environment installed? +"""""""""""""""""""""""""""""""""" + +When specified per-job, the environment is installed when you call ``ray.init()``. +When specified per-task or per-actor, the environment is installed when the task is invoked or the actor is instantiated (i.e. when you call ``my_task.remote()`` or ``my_actor.remote()``.) +See :ref:`Per-Job <rte-per-job>` :ref:`Per-Task/Actor, within a job <rte-per-task-actor>` for more details. + +Where are the environments cached? +"""""""""""""""""""""""""""""""""" + +Any local files downloaded by the environments are cached at ``/tmp/ray/session_latest/runtime_resources``. + + + +How long does it take to install or to load from cache? +""""""""""""""""""""""""""""""""""""""""""""""""""""""" + +The install time usually mostly consists of the time it takes to run ``pip install`` or ``conda create`` / ``conda activate``, or to upload/download a ``working_dir``, depending on which ``runtime_env`` options you're using. +This could take seconds or minutes. + +On the other hand, loading a runtime environment from the cache should be nearly as fast as the ordinary Ray worker startup time, which is on the order of a few seconds. A new Ray worker is started for every Ray actor or task that requires a new runtime environment. +(Note that loading a cached ``conda`` environment could still be slow, since the ``conda activate`` command sometimes takes a few seconds.) + + .. _remote-uris: @@ -485,6 +519,8 @@ Currently, three types of remote URIs are supported for hosting ``working_dir`` - ``runtime_env = {"working_dir": "gs://example_bucket/example_file.zip"}`` + + Hosting a Dependency on a Remote Git Provider: Step-by-Step Guide -----------------------------------------------------------------
Scons: Minor cleanup * Need to provide define everywhere, crashes have occurred if it was removed for newer Python, where things would compile with MinGW64 but not work.
@@ -828,8 +828,8 @@ else: env.Append(LINKFLAGS=["-s"]) -# MinGW for 64 bits needs this due to CPython bugs. -if win_target and target_arch == "x86_64" and env.gcc_mode: +# MinGW64 for 64 bits needs this due to CPython bugs. +if env.mingw_mode and target_arch == "x86_64": env.Append(CPPDEFINES=["MS_WIN64"]) # Set load libpython from binary directory default
Add a note about Google Colab setup User must run `jax.tools.colab_tpu.setup_tpu()`
@@ -466,6 +466,11 @@ the following in your cloud TPU VM: pip install --upgrade pip pip install "jax[tpu]>=0.2.16" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html ``` +Colab TPU runtimes come with JAX pre-installed, but before importing JAX you must run the following code to initialize the TPU: +```python +import jax.tools.colab_tpu +jax.tools.colab_tpu.setup_tpu() +``` ### Building JAX from source See [Building JAX from
llvm, composition: Extract controller output flattening hack to a new function This will be reused to synch compiled state
@@ -3866,16 +3866,19 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): proj_params = (tuple(p._get_param_initializer(execution_id)) for p in self.projections) return (tuple(mech_params), tuple(proj_params)) - def _get_data_initializer(self, execution_id=None): - output = [(os.parameters.value.get(execution_id) for os in m.output_states) for m in self._all_nodes] - if self.model_based_optimizer is not None: + def _get_flattened_controller_output(self, execution_id): controller_data = [os.parameters.value.get(execution_id) for os in self.model_based_optimizer.output_states] # This is an ugly hack to remove 2d arrays try: controller_data = [[c[0][0]] for c in controller_data] except: pass - output.append(controller_data) + return controller_data + + def _get_data_initializer(self, execution_id=None): + output = [(os.parameters.value.get(execution_id) for os in m.output_states) for m in self._all_nodes] + if self.model_based_optimizer is not None: + output.append(self._get_flattened_controller_output(execution_id)) data = [output] for node in self.nodes: nested_data = node._get_data_initializer(execution_id=execution_id) if hasattr(node,
Pin scenario001-multinode-containers to earlier ceph docker container Closes-Bug:
@@ -117,7 +117,7 @@ parameter_defaults: CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' CephPoolDefaultSize: 1 - DockerCephDaemonImage: ceph/daemon:tag-build-master-jewel-centos-7 + DockerCephDaemonImage: ceph/daemon:tag-build-ceph-dfg-jewel-centos-7 NovaEnableRbdBackend: true CinderEnableRbdBackend: true CinderBackupBackend: ceph
Support Copy Op Summary: Pull Request resolved:
@@ -216,7 +216,8 @@ NetDef TvmTransformer::applyTvmTransform( "MatMul", "BatchGather", "DotProduct", "Transpose", "Mul", "Tanh", - "Logit", "Cast"}; + "Logit", "Cast", + "Copy"}; try { // If the op position is black listed, return false
new test_partition_of_unity() in tests/test_poly_spaces.py tests lagrange, serendipity bases
@@ -128,6 +128,39 @@ class Test(TestCommon): return Test(conf=conf, options=options, gels=gels) + def test_partition_of_unity(self): + from sfepy.linalg import combine + from sfepy.discrete import Integral, PolySpace + + ok = True + orders = {'2_3' : 5, '2_4' : 5, '3_4' : 5, '3_8' : 5} + bases = ( + [ii for ii in combine( + [['2_4', '3_8'], ['lagrange', 'serendipity']] + )] + + [ii for ii in combine([['2_3', '3_4'], ['lagrange']])] + ) + + for geom, poly_space_base in bases: + max_order = orders[geom] + for order in range(max_order + 1): + if (poly_space_base == 'serendipity') and not (0 < order < 4): + continue + self.report('geometry: %s, base: %s, order: %d' + % (geom, poly_space_base, order)) + + integral = Integral('i', order=2 * order) + coors, _ = integral.get_qp(geom) + + ps = PolySpace.any_from_args('ps', self.gels[geom], order, + base=poly_space_base) + vals = ps.eval_base(coors) + _ok = nm.allclose(vals.sum(axis=-1), 1, atol=1e-14, rtol=0.0) + self.report('partition of unity:', _ok) + ok = ok and _ok + + return ok + def test_continuity(self): ok = True orders = {'2_3' : 3, '2_4' : 3, '3_4' : 4, '3_8' : 3}
Close tempfile before attempting to remove it I'm not a big fan of writing to files without the 'with open(...) as f' context manager notation, but this will do just fine.
@@ -752,6 +752,7 @@ def demo(): >>> parser_std.train([gold_sent],'temp.arcstd.model', verbose=False) Number of training examples : 1 Number of valid (projective) examples : 1 + >>> input_file.close() >>> remove(input_file.name) B. Check the ARC-EAGER training @@ -767,6 +768,7 @@ def demo(): Number of training examples : 1 Number of valid (projective) examples : 1 + >>> input_file.close() >>> remove(input_file.name) ###################### Check The Parsing Function ########################
fix for removed get_installed_distributions function This is a very dumb solution and we are cluttering the imports a lot. Much better would be to refactor pipchecker and raise the mimimum pip version.
@@ -24,7 +24,35 @@ try: except ImportError: from pip._internal.download import PipSession # type:ignore from pip._internal.req.req_file import parse_requirements + try: from pip._internal.utils.misc import get_installed_distributions + except ImportError: + from typing import cast + + def get_installed_distributions( + local_only=True, + include_editables=True, + editables_only=False, + user_only=False, + paths=None, + ): + """Return a list of installed Distribution objects. + Left for compatibility until direct pkg_resources uses are refactored out. + """ + from pip._internal.metadata import get_default_environment, get_environment + from pip._internal.metadata.pkg_resources import Distribution as _Dist + + if paths is None: + env = get_default_environment() + else: + env = get_environment(paths) + dists = env.iter_installed_distributions( + local_only=local_only, + include_editables=include_editables, + editables_only=editables_only, + user_only=user_only, + ) + return [cast(_Dist, dist)._dist for dist in dists] except ImportError: # pip < 10 try:
changed source DBs It might be better to enhance testing DBs with _tests suffix and control it with single argument (or env var).
@@ -38,9 +38,9 @@ class ModuleUnitTest(BaseTest): PERSIST = False # True to not purge temporary folder nor test DB TEST_OPENPYPE_MONGO = "mongodb://localhost:27017" - TEST_DB_NAME = "test_db" + TEST_DB_NAME = "avalon_tests" TEST_PROJECT_NAME = "test_project" - TEST_OPENPYPE_NAME = "test_openpype" + TEST_OPENPYPE_NAME = "openpype_tests" TEST_FILES = []
[tests] Fix cache_tests.py LoginStatus enum does not have an entry above 1 currently, user LoginStatus.AS_USER to test for username.
# import unittest -import scripts.maintenance.cache as cache from pywikibot.site import BaseSite +from pywikibot.login import LoginStatus + +import scripts.maintenance.cache as cache + from tests import join_cache_path from tests.aspects import TestCase @@ -24,7 +27,7 @@ class RequestCacheTests(TestCase): self.assertIsInstance(entry.site, BaseSite) self.assertIsInstance(entry.site._loginstatus, int) self.assertNotIsInstance(entry.site._username, list) - if entry.site._loginstatus >= 1: + if entry.site._loginstatus >= LoginStatus.AS_USER: self.assertIsNotNone(entry.site._username) self.assertIsInstance(entry._params, dict) self.assertIsNotNone(entry._params)
Clear training image on close. Would resend the training image since it was saved there. Now after unlisten it will nuke that last raw image.
@@ -169,6 +169,7 @@ class CameraInterface(wx.Frame): self.camera_lock.acquire() self.kernel.unlisten("camera_frame_raw", self.on_camera_frame_raw) self.kernel.unlisten("camera_frame", self.on_camera_frame) + self.kernel.signal("camera_frame_raw", None) self.close_camera() self.kernel.mark_window_closed("CameraInterface") self.kernel = None @@ -202,6 +203,8 @@ class CameraInterface(wx.Frame): self.update_in_gui_thread() def on_camera_frame_raw(self, frame): + if frame is None: + return try: import cv2 import numpy as np
PR Title: Remove stable build commands PR Body: PUBLIC: internal merge of PR Merge into Closes ORIGINAL_AUTHOR=Sergio Guadarrama COPYBARA_INTEGRATE_REVIEW=https://github.com/tensorflow/agents/pull/3 from tensorflow:sguada-patch-2
@@ -51,40 +51,16 @@ e.g.: <a id='Installation'></a> ## Installation -### Stable Builds - -To install the latest version, run the following: - -```shell -# Installing with the `--upgrade` flag ensures you'll get the latest version. -pip install --user --upgrade tf-agents # depends on TensorFlow -``` - -TF-Agents depends on a recent stable release of -[TensorFlow](https://www.tensorflow.org/install) (pip package `tensorflow`). - -Note: Since TensorFlow is *not* included as a dependency of the TF-Agents -package (in `setup.py`), you must explicitly install the TensorFlow -package (`tensorflow` or `tensorflow-gpu`). This allows us to maintain one -package instead of separate packages for CPU and GPU-enabled TensorFlow. - -To force a Python 3-specific install, replace `pip` with `pip3` in the above -commands. For additional installation help, guidance installing prerequisites, -and (optionally) setting up virtual environments, see the [TensorFlow -installation guide](https://www.tensorflow.org/install). - -### Nightly Builds - -There are also nightly builds of TF-Agents under the pip package +To install the latest version, use nightly builds of TF-Agents under the pip package `tf-agents-nightly`, which requires you install on one of `tf-nightly` and -`tf-nightly-gpu`. Nightly builds include newer features, but may be less stable -than the versioned releases. +`tf-nightly-gpu` and also `tensorflow-probability-nightly`. +Nightly builds include newer features, but may be less stable than the versioned releases. To install the nightly build version, run the following: ```shell # Installing with the `--upgrade` flag ensures you'll get the latest version. -pip install --user --upgrade tf-agents-nightly # depends on TensorFlow +pip install --user --upgrade tf-agents-nightly # depends on tf-nightly ``` <a id='Contributing'></a>
Updated `test_split_has_correct_data_points()` Updated `test_split_has_correct_data_points()` in class `TestLightcurve` to check if `split()` works with new attributes. Removed similar test from class `TestNewPeraSupport`
@@ -849,15 +849,26 @@ class TestLightcurve(object): def test_split_has_correct_data_points(self): test_time = np.array([1, 2, 3, 6, 7, 8]) test_counts = np.random.rand(len(test_time)) + test_bg_counts = np.random.rand(len(test_time)) + test_bg_ratio = np.random.rand(len(test_time)) + test_frac_exp = np.random.rand(len(test_time)) + with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UserWarning) - lc_test = Lightcurve(test_time, test_counts) + lc_test = Lightcurve(test_time, test_counts, bg_counts=test_bg_counts, + bg_ratio=test_bg_ratio, frac_exp=test_frac_exp) slc = lc_test.split(1.5) assert np.allclose(slc[0].time, [1, 2, 3]) assert np.allclose(slc[1].time, [6, 7, 8]) assert np.allclose(slc[0].counts, test_counts[:3]) assert np.allclose(slc[1].counts, test_counts[3:]) + assert np.allclose(slc[0].bg_counts, test_bg_counts[:3]) + assert np.allclose(slc[1].bg_counts, test_bg_counts[3:]) + assert np.allclose(slc[0].bg_ratio, test_bg_ratio[:3]) + assert np.allclose(slc[1].bg_ratio, test_bg_ratio[3:]) + assert np.allclose(slc[0].frac_exp, test_frac_exp[:3]) + assert np.allclose(slc[1].frac_exp, test_frac_exp[3:]) def test_split_with_three_segments(self): test_time = np.array([1, 2, 3, 6, 7, 8, 10, 11, 12]) @@ -1489,19 +1500,3 @@ class TestNewPeraSupport(): assert np.allclose(lc_new.frac_exp, np.array([3, 4, 1, 2])) assert lc_new.mjdref == mjdref - def test_split_has_correct_data_points(self): - test_time = np.array([1, 2, 3, 6, 7, 8]) - test_counts = np.random.rand(len(test_time)) - test_bg_counts = np.random.rand(len(test_time)) - - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=UserWarning) - lc_test = Lightcurve(test_time, test_counts, bg_counts=test_bg_counts) - slc = lc_test.split(1.5) - - assert np.allclose(slc[0].time, [1, 2, 3]) - assert np.allclose(slc[1].time, [6, 7, 8]) - assert np.allclose(slc[0].counts, test_counts[:3]) - assert np.allclose(slc[1].counts, test_counts[3:]) - assert np.allclose(slc[0].bg_counts, test_bg_counts[:3]) - assert np.allclose(slc[1].bg_counts, test_bg_counts[3:])
Fix loss of keyboard control after spawn We were stepping on this Urwid bug: The guys from pazz/alot found a fix, which I cribbed: Fix
@@ -10,6 +10,7 @@ import sys import tempfile import traceback import typing # noqa +import contextlib import urwid @@ -102,6 +103,16 @@ class ConsoleMaster(master.Master): return callback(*args) self.loop.set_alarm_in(seconds, cb) + @contextlib.contextmanager + def uistopped(self): + self.loop.stop() + try: + yield + finally: + self.loop.start() + self.loop.screen_size = None + self.loop.draw_screen() + def spawn_editor(self, data): text = not isinstance(data, bytes) fd, name = tempfile.mkstemp('', "mproxy", text=text) @@ -111,7 +122,7 @@ class ConsoleMaster(master.Master): c = os.environ.get("EDITOR") or "vi" cmd = shlex.split(c) cmd.append(name) - self.ui.stop() + with self.uistopped(): try: subprocess.call(cmd) except: @@ -121,7 +132,6 @@ class ConsoleMaster(master.Master): else: with open(name, "r" if text else "rb") as f: data = f.read() - self.ui.start() os.unlink(name) return data @@ -153,14 +163,13 @@ class ConsoleMaster(master.Master): c = "less" cmd = shlex.split(c) cmd.append(name) - self.ui.stop() + with self.uistopped(): try: subprocess.call(cmd, shell=shell) except: signals.status_message.send( message="Can't start external viewer: %s" % " ".join(c) ) - self.ui.start() os.unlink(name) def set_palette(self, opts, updated):
fix issue when training for 1 epoch (mostly to pass tests)
@@ -105,5 +105,8 @@ class SaveBestState(Callback): f" Module best state updated." ) + def on_train_start(self, trainer, pl_module): + self.best_module_state = deepcopy(pl_module.module.state_dict()) + def on_train_end(self, trainer, pl_module): pl_module.module.load_state_dict(self.best_module_state)
docs: remove explicit reference to Pi 2 and 3 Closes
@@ -5,7 +5,7 @@ Raspberry Pi ************ Mopidy runs on all versions of `Raspberry Pi <https://www.raspberrypi.org/>`_. -However, note that the Raspberry Pi 2 and 3 are significantly more powerful than +However, note that the later models are significantly more powerful than the Raspberry Pi 1 and Raspberry Pi Zero; Mopidy will run noticably faster on the later models.
add g++-12 dependancy for Ubuntu 22.04 add g++-12 dependancy for 22.04 scons -u -j8 gave clang++ not finding iostream and others. solution at the bottom of the page worked. installed g++-12 and built fine after that.
@@ -86,6 +86,7 @@ function install_ubuntu_lts_latest_requirements() { install_ubuntu_common_requirements $SUDO apt-get install -y --no-install-recommends \ + g++-12 \ qtbase5-dev \ qtchooser \ qt5-qmake \
Typo Fix Just quick word ordering fix!
@@ -69,7 +69,7 @@ If you go to the project directory and execute the command `git status`, you'll Add those changes to the branch you just created using the git add command: ``` -$ git add <YOU ADDED FILE> +$ git add <FILE YOU ADDED> ``` Now commit those changes using the git commit command:
Bug fix in writing node demands error in thermal calculation persists...
@@ -1168,7 +1168,7 @@ def calc_max_edge_flowrate(thermal_network, set_diameter, start_t, stop_t, use_m mass_flows_separated = zip(*mass_flows) thermal_network.edge_mass_flow_df = pd.DataFrame(np.column_stack(mass_flows_separated[0])).transpose() - thermal_network.node_mass_flow_df = pd.DataFrame(np.column_stack(mass_flows_separated[1])).transpose() + thermal_network.node_mass_flow_df = pd.DataFrame(np.vstack(mass_flows_separated[1])).transpose() thermal_network.edge_mass_flow_df.to_csv( thermal_network.locator.get_edge_mass_flow_csv_file(thermal_network.network_type,
remove unused function from gv remove unused function from gv
@@ -489,10 +489,3 @@ class GlobalVariables(object): def log(self, msg, **kwargs): print msg % kwargs - - def is_heating_season(self, timestep): - - if self.seasonhours[0] + 1 <= timestep < self.seasonhours[1]: - return False - else: - return True
Update semagglutidequantity.json added categories for grouping and review date for a years time
"low_is_good": true, "tags": [ "core", + "cost", + "diabetes", "safety" ], "numerator_type": "custom", "bnf_code LIKE '0601023AW%' --Semaglutide" ], "date_reviewed": [ - "2019-11-21" + "2020-05-03" ], "next_review": [ - "2019-11-21" + "2020-11-21" ], "authored_by": [ "[email protected]"
UIEditor : Allow switching between Standard and Auxiliary node gadgets Does it really make sense to call this "Show Name"? Maybe it should be a simple submenu with "Standard" and "Auxiliary" options, and then it would automatically work with other types in the future?
@@ -179,6 +179,24 @@ class UIEditor( GafferUI.NodeSetEditor ) : } ) + nodeGadgetTypes = Gaffer.Metadata.value( node, "uiEditor:nodeGadgetTypes" ) + if nodeGadgetTypes : + nodeGadgetTypes = set( nodeGadgetTypes ) + if nodeGadgetTypes == { "GafferUI::AuxiliaryNodeGadget", "GafferUI::StandardNodeGadget" } : + nodeGadgetType = Gaffer.Metadata.value( node, "nodeGadget:type" ) or "GafferUI::StandardNodeGadget" + menuDefinition.append( + "/Show Name", + { + "command" : functools.partial( cls.__setNameVisible, node ), + "checkBox" : nodeGadgetType == "GafferUI::StandardNodeGadget", + "active" : not Gaffer.MetadataAlgo.readOnly( node ), + } + ) + else : + # We want to present the options above as a simple "Show Name" checkbox, and haven't yet + # decided how to present other combinations of allowable gadgets. + IECore.msg( IECore.msg.Warning, "UIEditor", 'Unknown combination of "uiEditor:nodeGadgetTypes"' ) + @classmethod def appendNodeEditorToolMenuDefinitions( cls, nodeEditor, node, menuDefinition ) : @@ -316,6 +334,14 @@ class UIEditor( GafferUI.NodeSetEditor ) : with Gaffer.UndoScope( node.ancestor( Gaffer.ScriptNode ) ) : Gaffer.Metadata.registerValue( node, "nodeGadget:color", color ) + @staticmethod + def __setNameVisible( node, nameVisible ) : + + with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode ) ) : + Gaffer.Metadata.registerValue( + node, "nodeGadget:type", + "GafferUI::StandardNodeGadget" if nameVisible else "GafferUI::AuxiliaryNodeGadget" + ) GafferUI.Editor.registerType( "UIEditor", UIEditor )
hetr poll for exited child instead of hanging waiting for result from child, raise error fix style error
@@ -37,6 +37,8 @@ def build_transformer(name): class AsyncTransformer(Process): + SLEEP_S = 0.2 + def __init__(self, transformer_type): super(AsyncTransformer, self).__init__() self.transformer_type = transformer_type @@ -79,7 +81,17 @@ class AsyncTransformer(Process): self.async_transformer.work_q.put((self.comp_id, values)) def get_results(self): - return self.async_transformer.results_qs[self.comp_id].get() + while True: + try: + q = self.async_transformer.results_qs[self.comp_id] + return q.get(timeout=AsyncTransformer.SLEEP_S) + except Exception as e: + import Queue + if isinstance(e, Queue.Empty): + if not self.async_transformer.is_alive(): + raise RuntimeError("Child process unexpectedly exited") + else: + raise self.child_ops = returns self.child_args = placeholders @@ -179,16 +191,14 @@ class AsyncTransformer(Process): r.control_deps.add(op) def run(self): - # build the transformer first to catch any errors transformer = build_transformer(self.transformer_type) # collect requests to make computations, but do them all at once - SLEEP_S = 0.2 while self.work_q.empty(): if self.exit.is_set(): return - time.sleep(SLEEP_S) + time.sleep(AsyncTransformer.SLEEP_S) # build all the computations while not self.computation_q.empty(): @@ -199,13 +209,14 @@ class AsyncTransformer(Process): comp_id = self.computation_q.get() returns, placeholders = self.computation_builds[comp_id] computation = transformer.computation(returns, *placeholders) + self.computations[comp_id] = computation # begin doing work; trigger transformer init on first call while not self.exit.is_set(): try: # shared work q serializes work requests - comp_id, inputs = self.work_q.get(timeout=SLEEP_S) + comp_id, inputs = self.work_q.get(timeout=AsyncTransformer.SLEEP_S) # actual computation objects stored in this process, indexed computation = self.computations[comp_id]
Update GH Action test-ci.yml dependencies Updating version of checkout and setup-python actions. Also making sure we install tox, tox-gh-actions into our venv. Changes based on tox-gh-actions README.
@@ -18,14 +18,14 @@ jobs: runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip - pip install tox tox-gh-actions + python -m pip install tox tox-gh-actions - name: Test with tox run: tox
Commented out parameter_modulation_operation but EVC is now not correct
@@ -583,8 +583,6 @@ class ParameterState(State_Base): prefs=prefs, context=self) - self.parameterModulationOperation = self.paramsCurrent[PARAMETER_MODULATION_OPERATION] - def _validate_params(self, request_set, target_set=None, context=None): """Insure that parameterState (as identified by its name) is for a valid parameter of the owner
[doc] Add custom theme location see
@@ -229,6 +229,8 @@ Here are some screenshots for all themes that currently exist: :exclamation: Some themes (all 'Powerline' themes) require [Font Awesome](http://fontawesome.io/) and a powerline-compatible font ([powerline-fonts](https://github.com/powerline/fonts), for example) to display all icons correctly. +:exclamation: If you want to add your own team, just drop it into `~/.config/bumblebee-status/themes/` + Gruvbox Powerline (`-t gruvbox-powerline`) (contributed by [@TheEdgeOfRage](https://github.com/TheEdgeOfRage)): ![Gruvbox Powerline](https://github.com/tobi-wan-kenobi/bumblebee-status/blob/master/screenshots/themes/powerline-gruvbox.png)
Add CHANGES modified: CHANGES.rst
- (Hotfix) Fixed bug that allowed science frames to be assigned to multiple instrument configurations +- (Hotfix) Fixed typo related to GitHub download for offline processing 1.11.0 (21 Oct 2022) --------------------
Update android_cerberus.txt Many clean files point on these addresses
@@ -9356,12 +9356,6 @@ http://103.214.5.124 geldigelenolanlarigene.xyz -# Reference: https://twitter.com/unidentified0xc/status/1423343884399222796 -# Reference: https://www.virustotal.com/gui/file/2b3c217d08d9de6f7fb79f2585f747a1852a05a7e32a20cf04321b016e8977ee/detection - -ap-northeast-1.queue.amazonaws.com -sqs.ap-northeast-1.amazonaws.com - # Generic /edestekid4523e3/
CBCollection.__new__ fails on some platforms Tested-by: Build Bot Tested-by: Ellis Breen
@@ -286,12 +286,6 @@ def _wrap_multi_mutation_result(wrapped # type: CoreBucketOp return _inject_scope_and_collection(wrapper) -def _wrap_collections_class(cls): - for name in cls._MEMCACHED_OPERATIONS: - meth = getattr(cls, name) - if not name.startswith('_'): - setattr(cls, name, _inject_scope_and_collection(meth)) - def _dsop(create_type=None, wrap_missing_path=True): import functools @@ -320,9 +314,20 @@ def _dsop(create_type=None, wrap_missing_path=True): class CBCollection(wrapt.ObjectProxy): - def __new__(cls, *args, **kwargs): - _wrap_collections_class(cls) - return super(CBCollection, cls).__new__(cls, *args, **kwargs) + def __reduce_ex__(self, protocol): + raise NotImplementedError() + + def __reduce__(self): + raise NotImplementedError() + + @classmethod + def _wrap_collections_class(cls): + if not hasattr(cls, 'coll_wrapped'): + for name in cls._MEMCACHED_OPERATIONS: + meth = getattr(cls, name) + if not name.startswith('_'): + setattr(cls, name, _inject_scope_and_collection(meth)) + cls.coll_wrapped = True def _inject_scope_collection_kwargs(self, kwargs): # NOTE: BinaryCollection, for instance, contains a collection and has an interface @@ -352,6 +357,7 @@ class CBCollection(wrapt.ObjectProxy): :param options: miscellaneous options """ assert issubclass(type(parent_scope.bucket), CoreClientDatastructureWrap) + self._wrap_collections_class() wrapt.ObjectProxy.__init__(self, parent_scope.bucket) self._self_scope = parent_scope # type: Scope self._self_name = name # type: Optional[str]
Hopefully execute jobs only if required i.e. - always run detect changes - only execute baseline if detect_changes found common files changed - only execute evaluation if baseline is successful or if it is skipped but specific frameworks need to be evaluated
@@ -18,7 +18,8 @@ jobs: runs-on: ubuntu-latest outputs: output1: ${{ steps.find-required-tests.outputs.frameworks }} - output2: ${{ steps.find-required-tests.outputs.common_changed }} + output2: ${{ steps.find-required-tests.outputs.skip_baseline }} + output3: ${{ steps.find-required-tests.outputs.skip_evaluation }} steps: - uses: actions/checkout@v2 - name: pull base branch @@ -50,24 +51,34 @@ jobs: fi done + # Indicates which jobs should be executed (0) or not (1) + skip_evaluation=0 if [[ $is_common ]]; then frameworks='["autogluon", "autosklearn", "gama", "h2oautoml", "mlplanweka", "tpot", "constantpredictor", "randomforest", "tunedrandomforest"]' else changed=$(git diff --name-only HEAD..$GITHUB_BASE_REF | grep -o -i -P 'frameworks/(?!shared).*/' | uniq | sed -e 's/frameworks//' -e 's/\///g') + if [ ! -z $changed ]; + then json_array=[ for framework in $changed; do json_array=$json_array\"$framework\",; done frameworks=${json_array::-1}] #remove trailing comma and add closing bracket + else + # No changes to common files or frameworks - must be e.g. docs. No need to run tests. + skip_evaluation=1 + frameworks=[] + fi echo Building matrix for frameworks: $frameworks echo "::set-output name=frameworks::$frameworks" - echo "::set-output name=common_changed::$is_common" + echo "::set-output name=skip_baseline::$is_common" + echo "::set-output name=skip_evaluations::$skip_evaluation" baseline: name: Run Baseline on OpenML Iris runs-on: ubuntu-latest needs: detect_changes - if: needs.detect_changes.outputs.common_changed == 0 + if: needs.detect_changes.outputs.skip_baseline == 0 steps: - uses: actions/checkout@v2 - uses: actions/cache@v2 @@ -94,12 +105,14 @@ jobs: run_frameworks: name: ${{ matrix.framework }}/${{ matrix.task }} runs-on: ubuntu-latest - needs: baseline - if: ! ${{ failure() }} # `baseline` is cancelled when running only specific frameworks, but then should still run + needs: + - baseline + - detect_changes + if: ! ${{ failure() }} && ${{ needs.detect_changes.outputs.skip_evaluation }} == 0 strategy: matrix: python-version: [3.8] - framework: [autogluon, autosklearn, gama, h2oautoml, mlplanweka, tpot, constantpredictor, randomforest, tunedrandomforest] + framework: ${{ fromJson(needs.detect_changes.outputs.frameworks) }} task: [APSFailure, bioresponse, dresses-sales, eucalyptus, internet-advertisements, kc1, micro-mass] fail-fast: true # not sure about this one, but considering the big workload it might be nicer
fix docs/readme turns out *github* renders link text that spans lines, no problem. but *github pages* chokes on link text that span lines, instead linking to the raw markdown rather than the rendered html as intended. lesson learned the hard way: one line per link.
@@ -7,14 +7,5 @@ For now, the API is documented in the repo's top-level The Epidata API is built and maintained by the Carnegie Mellon University [Delphi research group](https://delphi.cmu.edu/). Explore one way in which -Delphi is responding to the pandemic by visiting the [COVID-19 Survey -page](covid_survey.md). - -# ignore - -gotta love testing in production. - -visiting the [COVID-19 Survey page](covid_survey.md). - -visiting the [COVID-19 Survey -page](covid_survey.md). +Delphi is responding to the pandemic by visiting the +[COVID-19 Survey page](covid_survey.md).
BUG: MolType.make_seq method no longer overrides attribute [FIXED] created a MolType._make_seqs attribute to separate these two
@@ -517,7 +517,7 @@ class MolType(object): seq_constructor = ''.join # safe default string constructor elif not preserve_existing_moltypes: seq_constructor.moltype = self - self.make_seq = seq_constructor + self._make_seq = seq_constructor # set the ambiguities ambigs = {self.missing: tuple( @@ -550,6 +550,7 @@ class MolType(object): self.pairs.update(make_pairs(pairs, motifset, self.gaps, self.degenerates)) self.mw_calculator = mw_calculator + # add lowercase characters, if we're doing that if add_lower: self._add_lowercase() @@ -623,7 +624,7 @@ class MolType(object): def make_seq(self, seq, name=None, **kwargs): """Returns sequence of correct type.""" - return self.make_seq(seq, name, **kwargs) + return self._make_seq(seq, name, **kwargs) def verify_sequence(self, seq, gaps_allowed=True, wildcards_allowed=True): """Checks whether sequence is valid on the default alphabet.
autostart fix for repo name change Simple change here, accompanied by a change in /home/pi/.config/autostart/runai.desktop in Raspbian, which will need to be updated manually for users who have an existing uSD with the old depthai-python-extras repo. All future images on uSDs that ship from Luxonis will have this change.
@@ -11,6 +11,6 @@ raspi-gpio set 33 dl # drive low to allow Myriad X to run echo Booting DepthAI echo Loading Start-up Demo Application... sleep 1 -cd /home/pi/Desktop/depthai-python-extras +cd /home/pi/Desktop/depthai python3 test.py sleep 60
Fix for periods from 11th onward not saving Only the first number of period id was parsed, thus saving worked only for ids 0 - 9.
@@ -84,7 +84,7 @@ export function sortPeriodDays($periodItem) { function updatePeriodDaysIndices($periodItem) { let originalDaysList = $periodItem.find('.weekday-row.original-day'); let newDaysList = $periodItem.find('.weekday-row:not(.original-day)'); - let periodIdNum = $periodItem[0].id.match(/[0-9]/)[0]; + let periodIdNum = $periodItem[0].id.match(/[0-9]+/)[0]; const setIndex = function (dayIndex, day) { $(day).attr('id', $(day).attr('id').replace(/-(\d+)-(\d+)/, '-' + periodIdNum + '-' + dayIndex));
split setup_env to separate out pre-commit step Dependency installations are now divided into setup_conda setup_pip setup_dependencies: setup_conda setup_pip in addition to setup_env
# This file is part of the pyani package distribution # (https://github.com/widdowquinn/pyani) -# Set up all development dependencies in the current conda environment -setup_env: +# Install conda dependencies +setup_conda: @conda install --file requirements-dev.txt --yes @conda install --file requirements.txt --yes @conda install --file requirements-thirdparty.txt --yes @conda install --file requirements-fastani.txt --yes @conda install --file requirements-pyqt-conda.txt --yes + +# Install pip dependencies +setup_pip: @pip install -r requirements-pip.txt + +# Install dependencies, but not pre-commit +setup_dependencies: setup_conda setup_pip + @pip install -U -e . + +# Set up all development dependencies and pre-commit in the current conda environment +setup_env: setup_conda setup_pip @pre-commit install @pip install -U -e . # Run all tests and display coverage report in a browser test: - @pytest --cov-report=html --cov=pyani -v tests/ && open htmlcov/index.html + @python -m pytest --cov-report=html --cov=pyani -v tests/ && open htmlcov/index.html # Build and display documentation docs: clean_docs
tests/TransferMechanism: Run more than one iteration in tests that use integrator mode Use rate other than 1 so observe the effect of integrator_mode = True.
@@ -54,15 +54,16 @@ class TestTransferMechanismInputs: T = TransferMechanism( name='T', default_variable=[0 for i in range(VECTOR_SIZE)], - integration_rate=1.0, + integration_rate=0.5, integrator_mode=True ) T.reset_stateful_function_when = Never() var = [10.0 for i in range(VECTOR_SIZE)] EX = pytest.helpers.get_mech_execution(T, mech_mode) + EX(var) val = EX(var) - assert np.allclose(val, [[10.0 for i in range(VECTOR_SIZE)]]) + assert np.allclose(val, [[7.5 for i in range(VECTOR_SIZE)]]) if benchmark.enabled: benchmark(EX, var) @@ -149,15 +150,16 @@ class TestTransferMechanismNoise: default_variable=[0 for i in range(VECTOR_SIZE)], function=Linear(), noise=5.0, - integration_rate=1.0, + integration_rate=0.5, integrator_mode=True ) T.reset_stateful_function_when = Never() EX = pytest.helpers.get_mech_execution(T, mech_mode) - var = [0 for i in range(VECTOR_SIZE)] + var = [1 for i in range(VECTOR_SIZE)] + EX(var) val = EX(var) - assert np.allclose(val, [[5.0 for i in range(VECTOR_SIZE)]]) + assert np.allclose(val, [[8.25 for i in range(VECTOR_SIZE)]]) if benchmark.enabled: benchmark(EX, var) @@ -203,16 +205,17 @@ class TestTransferMechanismNoise: name='T', default_variable=[0 for i in range(VECTOR_SIZE)], function=Linear(), - noise=[5.0 for i in range(VECTOR_SIZE)], - integration_rate=1.0, + noise=[5.0 + i for i in range(VECTOR_SIZE)], + integration_rate=0.3, integrator_mode=True ) T.reset_stateful_function_when = Never() EX = pytest.helpers.get_mech_execution(T, mech_mode) var = [0 for i in range(VECTOR_SIZE)] + EX(var) val = EX(var) - assert np.allclose(val, [[5.0 for i in range(VECTOR_SIZE)]]) + assert np.allclose(val, [[8.5 + (i * 1.7) for i in range(VECTOR_SIZE)]]) if benchmark.enabled: benchmark(EX, var)
llvm, function/Distance: Don't use self._variable_length The variable is available right.
@@ -10560,7 +10560,7 @@ class Distance(ObjectiveFunction): # FIXME: PEARSON breaks output format if (self.metric == PEARSON): - selfcor = 1/(self._variable_length // 2) if self.normalize else 1 + selfcor = 1 / len(variable[0]) if self.normalize else 1 return np.array([[selfcor, ret], [ret, selfcor]]) return ret
Fix for duration of signals in `generate_one_simple_segment` The default `sampling_rate` has units kHz, and the default `duration` has units s. By dropping units from their product without first simplifying, the number of data points was off by a factor of 1000. Consequently, the default 6 second signal was instead 6 milliseconds long.
@@ -64,7 +64,7 @@ def generate_one_simple_segment(seg_name='segment 0', supported_objects=[], nb_a seg = Segment(name=seg_name) if AnalogSignal in supported_objects: for a in range(nb_analogsignal): - anasig = AnalogSignal(rand(int(sampling_rate * duration)), sampling_rate=sampling_rate, + anasig = AnalogSignal(rand(int((sampling_rate * duration).simplified)), sampling_rate=sampling_rate, t_start=t_start, units=pq.mV, channel_index=a, name='sig %d for segment %s' % (a, seg.name)) seg.analogsignals.append(anasig)
docs: Update references to "QEMU-native TLS" document Link to the "Secure live migration with QEMU-native TLS" document from other relevant guides, and small blurbs of text where appropriate. Blueprint: support-qemu-native-tls-for-live-migration
@@ -75,10 +75,6 @@ using the KVM and XenServer hypervisors. KVM-libvirt ~~~~~~~~~~~ -.. :ref:`_configuring-migrations-kvm-general` -.. :ref:`_configuring-migrations-kvm-block-and-volume-migration` -.. :ref:`_configuring-migrations-kvm-shared-storage` - .. _configuring-migrations-kvm-general: General configuration @@ -136,13 +132,29 @@ the instructions below: Be mindful of the security risks introduced by opening ports. +.. _`configuring-migrations-securing-live-migration-streams`: + +Securing live migration streams +------------------------------- + +If your compute nodes have at least libvirt 4.4.0 and QEMU 2.11.0, it is +strongly recommended to secure all your live migration streams by taking +advantage of the "QEMU-native TLS" feature. This requires a +pre-existing PKI (Public Key Infrastructure) setup. For further details +on how to set this all up, refer to the +:doc:`secure-live-migration-with-qemu-native-tls` document. + + .. _configuring-migrations-kvm-block-and-volume-migration: Block migration, volume-based live migration -------------------------------------------- -No additional configuration is required for block migration and volume-backed -live migration. +If your environment satisfies the requirements for "QEMU-native TLS", +then block migration requires some setup; refer to the above section, +`Securing live migration streams`_, for details. Otherwise, no +additional configuration is required for block migration and +volume-backed live migration. Be aware that block migration adds load to the network and storage subsystems.
Run CDK tests as part of the run-tests GitHub action I've set this up as a separate job under this action to isolate setting up node and the CDK to only the tests that need it.
@@ -24,6 +24,29 @@ jobs: pip install -e . - name: Run PRCheck run: make prcheck + cdktests: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + python-version: [3.6, 3.7, 3.8] + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + with: + node-version: '14' + - uses: actions/setup-python@v2 + name: Set up Python ${{ matrix.python-version }} + with: + python-version: ${{ matrix.python-version }} + - name: Install CDK + run: npm install -g aws-cdk + - name: Install dependencies + run: | + pip install -r requirements-dev.txt -r requirements-docs.txt + pip install -e .[cdk] + - name: Run CDK tests + run: python -m pytest tests/functional/cdk # Chalice works on windows, but there's some differences between # the GitHub actions windows environment and our windows dev # laptops that are causing certain tests to fail. Once these
TST: added test for `inst_module` instantiation Added a test for appropriate `inst_module` values.
@@ -94,6 +94,7 @@ class InstTestClass(): assert inst.name == module.name assert inst.inst_id == inst_id assert inst.tag == tag + assert inst.inst_module is not None # Test the required class attributes for iattr in self.inst_attrs:
Changed dimensionality_PCA just for testing
@@ -228,7 +228,7 @@ def dimensionality_RF(instruction, dataset, target="", y="", n_features=10): accuracy_scores), list(columns[the_index]) -def dimensionality_PCA(instruction, dataset, target="", y="", n_components=10): +def dimensionality_PCA(instruction, dataset, target="", y=""): global currLog global counter @@ -244,12 +244,13 @@ def dimensionality_PCA(instruction, dataset, target="", y="", n_components=10): le = preprocessing.LabelEncoder() y = le.fit_transform(y) - pca = PCA(n_components=len(dataset.columns)) + # PCA will hold 92% of the variance + pca = PCA(0.92) data_modified = pca.fit_transform(dataset) X_train, X_test, y_train, y_test = train_test_split( dataset, y, test_size=0.2, random_state=49) - X_train_mod, none, y_train_mod, none1 = train_test_split( + X_train_mod, X_test_mod, y_train_mod, y_test_mod = train_test_split( data_modified, y, test_size=0.2, random_state=49) clf = tree.DecisionTreeClassifier() @@ -257,11 +258,20 @@ def dimensionality_PCA(instruction, dataset, target="", y="", n_components=10): clf_mod = tree.DecisionTreeClassifier() clf_mod.fit(X_train_mod, y_train_mod) + acc=[] + acc.append(accuracy_score( + clf_mod.predict(X_test_mod), y_test_mod)) + for j in ["entropy","gini"]: + for i in range(3,len(dataset.columns)): + model=tree.DecisionTreeClassifier(criterion=j, max_depth=i) + model=model.fit(X_train,y_train) + pred=model.predict(X_test) + acc.append(accuracy_score(pred,y_test)) accuracies = [ accuracy_score( - clf.predict(X_test), y_test), accuracy_score( - clf_mod.predict(none), none1)] + clf.predict(X_test), y_test), max(acc)] + data_modified = pd.DataFrame(data_modified) y_combined = np.r_[y_train, y_test]
[hail] retry gradle download This addresses issues where the gradle download may fail. We retry a command that is cheap (`--version`) but which requries downloading the gradle binary.
@@ -544,6 +544,7 @@ steps: cd repo {{ code.checkout_script }} cd hail + time retry ./gradlew --version time make jars python-version-info wheel time (cd python && zip -r hail.zip hail hailtop) time tar czf test.tar.gz -C python test @@ -1715,6 +1716,7 @@ steps: fi cd hail + time retry ./gradlew --version make test-dataproc DEV_CLARIFIER=ci_test_dataproc/ dependsOn: - ci_utils_image @@ -1759,6 +1761,7 @@ steps: exit 0 fi + time retry ./gradlew --version make wheel upload-artifacts DEPLOY_REMOTE=origin bash scripts/deploy.sh $(cat /io/hail_pip_version) \
Move SVGImage import into conditional stagement SVG image support is optional. By importing the svgimage module at the top of the image.py module, any rst document that contained an image would rely on having svglib installed. This commit fixes that problem.
@@ -13,8 +13,6 @@ import urllib from .opt_imports import PILImage, pdfinfo from .log import log, nodeid -from .svgimage import SVGImage - # This assignment could be overridden by an extension module VectorPdf = None @@ -193,6 +191,7 @@ class MyImage (Flowable): if extension in ['svg','svgz']: log.info('Backend for %s is SVGIMage'%filename) + from .svgimage import SVGImage backend=SVGImage elif extension in ['pdf']: @@ -246,6 +245,7 @@ class MyImage (Flowable): xdpi, ydpi = client.styles.def_dpi, client.styles.def_dpi extension = imgname.split('.')[-1].lower() if extension in ['svg','svgz']: + from .svgimage import SVGImage iw, ih = SVGImage(imgname, srcinfo=srcinfo).wrap(0, 0) # These are in pt, so convert to px iw = iw * xdpi / 72
Update lokibot.txt ```/fre.php``` is in use from proto-historic times of MT.
@@ -2605,6 +2605,10 @@ http://193.142.59.22/jaydee/logs/fre.php /panel_jee.php /pen.php +# Reference: https://twitter.com/wwp96/status/1235606545771175943 + +site-inspection.com + # Reference: http://tracker.viriback.com/dump.php (2020-02-29) /high/sumy/ltd.php @@ -2623,4 +2627,3 @@ http://193.142.59.22/jaydee/logs/fre.php /r!/e/site.php /t70/H/site.php /vp-/9/site.php -/fre.php
[easy] Stop hardcoding "python" executable in bottleneck tests Right now, the bottleneck test_utils.py tests assume that a user's python executable is 'python'. This may not be the case especially if the user has multiple versions of python installed. This PR changes it so that test_utils.py uses `sys.executable` as the python executable.
@@ -525,7 +525,7 @@ class TestBottleneck(TestCase): if scriptargs != '': scriptargs = ' {}'.format(scriptargs) rc, out, err = self._run( - 'python -m torch.utils.bottleneck {}{}'.format(filepath, scriptargs)) + '{} -m torch.utils.bottleneck {}{}'.format(sys.executable, filepath, scriptargs)) return rc, out, err def _check_run_args(self):
docs: Fix anchor link written in reference link syntax. This has been broken since it was added in a rewrite in July 2019 in An incomplete fix was made a few days later in
@@ -28,7 +28,7 @@ Our API documentation is defined by a few sets of files: This first section is focused on explaining how the API documentation system is put together; when actually documenting an endpoint, you'll -want to also read the [Step by step guide][#step-by-step-guide]. +want to also read the [Step by step guide](#step-by-step-guide). ## How it works
Defined found_results before try block so it's always assigned Up until now, the "if not found_results" line could throw an UnboundLocalError because the variable was assigned inside a try block which could fail but the variable was later referenced.
@@ -442,6 +442,7 @@ class LDAPUsers(FederatedUsers): break while True: + found_results = 0 try: if has_pagination: _, rdata, _, serverctrls = conn.result3(msgid) @@ -449,7 +450,6 @@ class LDAPUsers(FederatedUsers): _, rdata = conn.result(msgid) # Yield any users found. - found_results = 0 for userdata in rdata: found_results = found_results + 1 yield self._build_user_information(userdata[1])
Updated configuration documentation Debug toolbar settings look for DEBUG_TOOLBAR_CONFIG in django settings. Documentation implies this dict is called CONFIG_DEFAULTS.
@@ -191,7 +191,7 @@ Panel options Here's what a slightly customized toolbar configuration might look like:: # This example is unlikely to be appropriate for your project. - CONFIG_DEFAULTS = { + DEBUG_TOOLBAR_CONFIG = { # Toolbar options 'RESULTS_CACHE_SIZE': 3, 'SHOW_COLLAPSED': True,
stream edit: Add <Enter> keyboard shortcut to invite users to stream. Fixes
@@ -665,6 +665,13 @@ exports.initialize = function () { $("#subscriptions_table").on("click", ".sub_setting_checkbox", exports.stream_setting_clicked); + $("#subscriptions_table").on("keyup", ".subscriber_list_add form", function (e) { + if (e.which === 13) { + e.preventDefault(); + submit_add_subscriber_form(e); + } + }); + $("#subscriptions_table").on("submit", ".subscriber_list_add form", function (e) { e.preventDefault(); submit_add_subscriber_form(e);
Remove printer module This will be replaced later with something that is customized more for CLI. For now, simple logging messages will suffice.
@@ -4,7 +4,6 @@ import sys import yaml import argparse import os -import fonz.printer as printer from fonz.runner import Runner from fonz.client import LookerClient from fonz.exceptions import FonzException, ValidationError @@ -20,11 +19,8 @@ def handle_exceptions(function): except FonzException as error: logger.error( f"{error}\n\n" - + printer.color( "For support, please create an issue at " - "https://github.com/dbanalyticsco/Fonz/issues\n", - "dim", - ) + "https://github.com/dbanalyticsco/Fonz/issues\n" ) sys.exit(error.exit_code) except Exception as error: @@ -32,11 +28,8 @@ def handle_exceptions(function): logger.error( f'Encountered unexpected {error.__class__.__name__}: "{error}"\n' f"Full error traceback logged to {LOG_FILEPATH}\n\n" - + printer.color( "For support, please create an issue at " - "https://github.com/dbanalyticsco/Fonz/issues\n", - "dim", - ) + "https://github.com/dbanalyticsco/Fonz/issues\n" ) sys.exit(1)
Add garage.sampler to API Reference Somehow, I forgot this.
@@ -55,6 +55,7 @@ and how to implement new MDPs and new algorithms. _autoapi/garage/np/index _autoapi/garage/plotter/index _autoapi/garage/replay_buffer/index + _autoapi/garage/sampler/index _autoapi/garage/tf/index _autoapi/garage/torch/index ```
Generate JSON to construct the job matrix This way only the executed jobs are generated, instead of having to rely on skips later, which will make the joblist/UI less cluttered.
@@ -30,7 +30,12 @@ jobs: - name: store changed frameworks id: frameworks-diff run: | - echo "::set-output name=frameworks::`git diff --name-only HEAD..$GITHUB_BASE_REF | grep -o -i -P 'frameworks/(?!shared).*/' | uniq | sed -e 's/frameworks//' -e 's/\///g' | perl -p -e 's/\n//'`" + changed=$(git diff --name-only HEAD..$GITHUB_BASE_REF | grep -o -i -P 'frameworks/(?!shared).*/' | uniq | sed -e 's/frameworks//' -e 's/\///g') + json_array=[ + for framework in $changed; do json_array=$json_array\"$framework\",; done + json_array=${json_array::-1}] #remove trailing comma and add closing bracket + echo Building matrix for changed frameworks: $json_array + echo "::set-output name=frameworks::$json_array" run_frameworks: @@ -39,7 +44,7 @@ jobs: needs: framework_changes strategy: matrix: - framework: [autogluon, autosklearn, gama, h2oautoml, mlplanweka, tpot, constantpredictor, randomforest, tunedrandomforest] + framework: ${{ fromJson(needs.framework_changes.outputs.frameworks) }} task: [APSFailure, bioresponse, dresses-sales, eucalyptus, internet-advertisements, kc1, micro-mass] fail-fast: true
Update algo.h removed fmod and added modulo
@@ -62,7 +62,7 @@ void launcher(const ML::cumlHandle_impl &handle, Pack<value_t, index_t> data, auto fused_op = [vd, n] __device__(index_t global_c_idx, bool in_neigh) { // fused construction of vertex degree - index_t batch_vertex = fmod(global_c_idx, n); + index_t batch_vertex = global_c_idx % n; if (sizeof(index_t) == 4) { atomicAdd((unsigned int *)(vd + batch_vertex), in_neigh);
Fixing boto3 install issue. Fix: manually pin python-dateutil to 2.8.0
@@ -39,6 +39,10 @@ setup( 'pytest_marker_bugzilla>=0.9.1.dev6', 'pyvmomi', 'pyhcl', + # issue opened for botocore + # https://github.com/boto/botocore/issues/1872 + # till above issue fixed, manually pointing python-dateutil to 2.8.0 + 'python-dateutil==2.8.0', ], entry_points={ 'console_scripts': [
Attempt to reduce memory usage for host monitor Compile the regexps for ping once only, in an attempt to address possible memory leakage
@@ -212,21 +212,21 @@ class MonitorHost(Monitor): 'host', required=True ) + self.r = re.compile(self.ping_regexp) + self.r2 = re.compile(self.time_regexp) def run_test(self): - r = re.compile(self.ping_regexp) - r2 = re.compile(self.time_regexp) success = False pingtime = 0.0 try: cmd = (self.ping_command % self.host).split(' ') output = subprocess.check_output(cmd) for line in str(output).split("\n"): - matches = r.search(line) + matches = self.r.search(line) if matches: success = True else: - matches = r2.search(line) + matches = self.r2.search(line) if matches: pingtime = matches.group("ms") except Exception as e:
Update Michigan.md starting geos
@@ -8,7 +8,7 @@ tags: protester, punch, tackle id: mi-detroit-4 -geolocation: +geolocation: 42.3312081,-83.043529 **Links** @@ -23,7 +23,7 @@ tags: beat, protester, shield, tackle id: mi-detroit-5 -geolocation: +geolocation: 42.3309973,-83.04279 **Links** @@ -38,7 +38,7 @@ tags: protester, shove, threaten id: mi-detroit-1 -geolocation: +geolocation: 42.3348206,-83.0448825 **Links** @@ -71,7 +71,7 @@ tags: journalist, less-lethal, rubber-bullet, shoot, tear-gas id: mi-detroit-3 -geolocation: +geolocation: 42.332364,-83.0575983 **Links** @@ -86,7 +86,7 @@ tags: arrest, knee, protester, punch, throw id: mi-detroit-6 -geolocation: +geolocation: 42.3331454,-83.0440872 **Links** @@ -101,7 +101,7 @@ tags: journalist, push, shove id: mi-detroit-14 -geolocation: 42.330079, -83.056530 +geolocation: 42.3302387,-83.0563789 **Links** @@ -163,7 +163,7 @@ tags: less-lethal, lrad, protester id: mi-detroit-8 -geolocation: +geolocation: 42.4057561,-82.9967976 **Links** @@ -178,7 +178,7 @@ tags: protester, vehicle id: mi-detroit-9 -geolocation: +geolocation: 42.3083773,-83.1346729 **Links**
Update show_cloudexpress_applications.py Taken feedback from Takashiand adjusted latency, loss and vpn to integers.
@@ -3,7 +3,7 @@ from genie.metaparser.util.schemaengine import Any, Or, Optional import re # ======================================= -# Schema for 'show cloudexpress applications' +# Schema for 'show cloudexpress application' # ======================================= class ShowCloudexpressApplicationSchema(MetaParser): @@ -13,14 +13,14 @@ class ShowCloudexpressApplicationSchema(MetaParser): schema = { "index": { Any(): { - "vpn": str, + "vpn": int, "application": str, "exit_type": str, "gw_sys_ip": str, "interface": str, - "latency": str, + "latency": int, "local_color": str, - "loss": str, + "loss": int, "remote_color": str, } } @@ -56,14 +56,15 @@ class ShowCloudexpressApplication(ShowCloudexpressApplicationSchema): m = p1.match(line) if m: group = m.groupdict() + #vpn = group['vpn'] vpn_dict = result_dict.setdefault('index', {}).setdefault(index, {}) - vpn_dict.update({'vpn': group['vpn']}) + vpn_dict.update({'vpn': int(group['vpn'])}) vpn_dict.update({'application': group['application']}) vpn_dict.update({'exit_type': group['exit_type']}) vpn_dict.update({'gw_sys_ip': group['gw_sys_ip']}) vpn_dict.update({'interface': group['interface']}) - vpn_dict.update({'latency': group['latency']}) - vpn_dict.update({'loss': group['loss']}) + vpn_dict.update({'latency': int(group['latency'])}) + vpn_dict.update({'loss': int(group['loss'])}) vpn_dict.update({'local_color': group['local_color']}) vpn_dict.update({'remote_color': group['remote_color']}) index += 1
batch norm docfix fixes the formula for batch normalization (moves the epsilon inside the square root)
@@ -53,7 +53,7 @@ class BatchNorm1d(_BatchNorm): .. math:: - y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta The mean and standard-deviation are calculated per-dimension over the mini-batches and gamma and beta are learnable parameter vectors @@ -95,7 +95,7 @@ class BatchNorm2d(_BatchNorm): .. math:: - y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta The mean and standard-deviation are calculated per-dimension over the mini-batches and gamma and beta are learnable parameter vectors @@ -137,7 +137,7 @@ class BatchNorm3d(_BatchNorm): .. math:: - y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta The mean and standard-deviation are calculated per-dimension over the mini-batches and gamma and beta are learnable parameter vectors
Respell scalar map to scalar alias extra attr is now named `scalar-aliases` map is of alias to one or more patterns if a logged scalar matches an alias pattern, that alias is used for the value
@@ -22,9 +22,12 @@ import hashlib import logging import os import random +import re import time import sys +import six + from whoosh import fields from whoosh import index from whoosh import query @@ -307,7 +310,7 @@ class RunIndex(object): from tensorboard.backend.event_processing import event_accumulator _ensure_tf_logger_patched() scalars = {} - scalar_map = run.get("_extra_scalar_map", {}) + scalar_aliases = self._init_scalar_aliases(run) for path in io_wrapper.GetLogdirSubdirectories(run.path): events_checksum_field_name = self._events_checksum_field_name(path) last_checksum = fields.get(events_checksum_field_name) @@ -320,10 +323,37 @@ class RunIndex(object): rel_path = os.path.relpath(path, run.path) events = event_accumulator._GeneratorFromPath(path).Load() scalar_vals = self._scalar_vals(events, rel_path) - self._apply_scalar_vals(scalar_vals, scalars, scalar_map) + self._apply_scalar_vals(scalar_vals, scalars, scalar_aliases) scalars[events_checksum_field_name] = cur_checksum return scalars + @staticmethod + def _init_scalar_aliases(run): + """Returns list of scalar aliases as `key`, `pattern` tuples. + + `key` is the mapped scalar key and `pattern` is the associated + scalar key pattern. If a logged scalar key matches `pattern`, + `key` is treated as an alias of the logged scalar key. + """ + attr = run.get("_extra_scalar-aliases", {}) + if not isinstance(attr, dict): + log.debug( + "unexpected type for _extra_scalar-aliases: %s", + type(attr)) + return [] + aliases = [] + for key, patterns in sorted(attr.items()): + if isinstance(patterns, six.string_types): + patterns = [patterns] + for p in patterns: + try: + compiled_p = re.compile(p) + except Exception: + log.debug("invalid alias pattern for %s: %s", key, p) + else: + aliases.append((key, compiled_p)) + return aliases + @staticmethod def _events_checksum_field_name(path): """Returns a field name that is unique for any given path.""" @@ -367,18 +397,14 @@ class RunIndex(object): else: return os.path.normpath(path_prefix) + "/" + tag - def _apply_scalar_vals(self, scalar_vals, scalars, scalar_map): + def _apply_scalar_vals(self, scalar_vals, scalars, aliases): for key, vals in scalar_vals.items(): if not vals: continue self._store_scalar_vals(key, vals, scalars) - try: - mapped_key = scalar_map[key] - except KeyError: - pass - else: - log.debug("mapping scalar %s to %s", key, mapped_key) - self._store_scalar_vals(mapped_key, vals, scalars) + for alias_key in self._alias_keys(key, aliases): + log.debug("using alias %s for %s", alias_key, key) + self._store_scalar_vals(alias_key, vals, scalars) @staticmethod def _store_scalar_vals(key, vals, scalars): @@ -386,6 +412,12 @@ class RunIndex(object): scalars[_encode_field_name("scalar_", key)] = last_val scalars[_encode_field_name("scalar_", key + "_step")] = step + @staticmethod + def _alias_keys(logged_key, aliases): + for alias, pattern in aliases: + if pattern.match(logged_key): + yield alias + def _index_run(self, run, writer): log.debug("indexing run %s", run.id) fields = dict(
A few adjustments enable bumpmapping and specular as they help with visibility disable player model on hud due to animation issues it causes, as well as FPS decrease remove some redundant options now that comfig.cfg has been updated
@@ -9,34 +9,27 @@ mat_phong 0 // Disable phong for flatter shading nb_shadow_dist 0 // Disable shadow distance mat_colorcorrection 0 // Disable color correction mat_postprocessing_combine 0 // Faster post processing quit when you have software AA, bloom and color correction disabled -mat_trilinear 0 // Disable trilinear -mat_forceaniso 0 // Disable anisotropic filtering r_flex 0 // Disable facial animations flex_rules 0 // Disable facial animations r_eyemove 0 // Disable eye movement tf_clientsideeye_lookats 0 // Disable eye lookats r_eyes 0 // Disable eyes r_teeth 0 // Do not render teeth, small FPS boost -cl_SetupAllBones 1 // Set up animation components that were mistakedly skipped r_spray_lifetime 0 // Clear sprays immediately cl_playerspraydisable 1 // Disable player sprays r_decal_cullsize 256 // Hide decals when far away props_break_max_pieces 0 // Disables gibs and prop breaking r_staticprop_lod 63 // Force lowest LOD (lowest quality) -mat_bumpmap 0 // Disable bumpmap materials -mat_specular 0 // Disable specular materials r_3dsky 0 // Disable 3D sky cl_jiggle_bone_framerate_cutoff 0 // Always disable jigglebones mat_picmip 2 // Use lowest quality textures r_ropetranslucent 0 // Disable ropes rope_rendersolid 0 // ^ -cl_hud_playerclass_use_playermodel 1 // Enable live player model in HUD, good for spy mod_touchalldata 1 // Be more safe with loading mod_forcedata 1 // ^ snd_pitchquality 0 // Use linear mixer for sound, little performance benefit snd_disable_mixer_duck 0 // Enable mixing sounds to adjust volume snd_spatialize_roundrobin 0 // Spatialize every frame dsp_water 0 // Disable water muffling effect -voice_buffer_ms 100 // Less delay for voice buffer echo "competitive preset loaded."
datapaths.pcap_switch: Allow reuse of ctl server This allows multiple datapaths to use the same ctl server. Before it only really worked for a single DP.
@@ -106,12 +106,15 @@ def launch (address = '127.0.0.1', port = 6633, max_retry_delay = 16, raise RuntimeError("You need PXPCap to use this component") if ctl_port: - if core.hasComponent('ctld'): - raise RuntimeError("Only one ctl_port is allowed") - if ctl_port is True: ctl_port = DEFAULT_CTL_PORT + if core.hasComponent('ctld'): + if core.ctld.port != ctl_port: + raise RuntimeError("Only one ctl_port is allowed") + # We can reuse the exiting one + else: + # Create one... import ctl ctl.server(ctl_port) core.ctld.addListenerByName("CommandEvent", _do_ctl)
Add docs about building the docs. re:
@@ -3,6 +3,7 @@ IPython Documentation This directory contains the majority of the documentation for IPython. + Deploy docs ----------- @@ -12,13 +13,29 @@ Pull requests. Requirements ------------ + +The documentation must be built using Python 3. + The following tools are needed to build the documentation: - sphinx + - sphinx_rtd_theme On Debian-based systems, you should be able to run:: - sudo apt-get install python-sphinx + sudo apt-get install python3-sphinx python3-sphinx-rtd-theme + +In a conda environment, you can use:: + + conda install sphinx sphinx_rtd_theme + +In a Python 3 ``venv``, you should be able to run:: + + pip install -U sphinx sphinx_rtd_theme + + +Build Commands +-------------- The documentation gets built using ``make``, and comes in several flavors. @@ -34,6 +51,3 @@ API documentation. This build target skips that. ``make pdf`` will compile a pdf from the documentation. You can run ``make help`` to see information on all possible make targets. - - -
Fixed the QRadarFullSearch Problems were: 1. Was not compiled because of this line: `return {"Type" : entryTypes.error, "ContentsFormat" : formats.text, "Contents" : 'An Error occurred during the search process. search status={0}.'.format(status)};` Since we don't have `status` var... 2. q_status in [..] does not work in JavaScript...
@@ -25,14 +25,14 @@ script: |- //submit query, retrive search_id var query_res = executeCommand("qradar-searches", args); + if (isError(query_res[0])) { return query_res; - } - else { + } else { search_id = dq(query_res[0], "Contents.search_id"); search_args.search_id = search_id; - //polling stage + //polling stage var sec = 0; while ((sec < timeout) && !error) { status_res = executeCommand("qradar-get-search", search_args); @@ -42,31 +42,31 @@ script: |- var q_status = dq(status_res[0], "Contents.status"); - if (q_status && q_status in ['WAIT', 'EXECUTE', 'SORTING']){ + if (q_status && ['WAIT', 'EXECUTE', 'SORTING'].indexOf(q_status) !== -1) { log('search id is {0} search status is {1}'.format(search_id,q_status)); - } - else if (q_status && q_status == 'COMPLETED'){ + } else if (q_status && q_status == 'COMPLETED') { search_done = true; - } - else{ + } else { error = true; - return {"Type" : entryTypes.error, "ContentsFormat" : formats.text, "Contents" : 'An Error occurred during the search process. search status={0}.'.format(status)}; + return {"Type" : entryTypes.error, "ContentsFormat" : formats.text, "Contents" : 'An Error occurred during the search process. search status={0}.'.format(q_status)}; } if (search_done) { break; } + sec += interval; wait(interval); } + if (sec >= timeout) { return {"Type" : entryTypes.error, "ContentsFormat" : formats.text, "Contents" : 'Timeout reached. waited for {0} seconds'.format(timeout)}; } + // get results if (search_done) { return executeCommand("qradar-get-search-results", search_args); - } - else { + } else { return {"Type" : entryTypes.error, "ContentsFormat" : formats.text, "Contents" : 'Unexpected error occurred'}; } }
Include guild.op_main in cmd preview At this point guild.op_main is more than just a thin wrapper as it can execute a script with global assigns. Rather than decide when and when not to show guild.op_main, always show.
@@ -618,11 +618,7 @@ def _invalid_op_spec_error(e, opdef): cli.error("operation '%s' is not valid: %s" % (opdef.fullname, e)) def _preview_cmd(op): - # Quote args and remote guild.op_main - return [ - pipes.quote(arg) for arg in op.cmd_args - if arg != "guild.op_main" - ] + return [pipes.quote(arg) for arg in op.cmd_args] def _print_env(op): for name, val in sorted(op.cmd_env.items()):
$.Analysis: make derived types available for root AST properties decl. TN:
@@ -789,6 +789,30 @@ package ${ada_lib_name}.Analysis is procedure Assign_Names_To_Logic_Vars_Impl (Node : access ${root_node_value_type}) is null; + ------------------------------------------------------ + -- AST node derived types (incomplete declarations) -- + ------------------------------------------------------ + + type ${generic_list_value_type}; + -- Base type for all lists of AST node subclasses + + type ${generic_list_type_name} is + access all ${generic_list_value_type}'Class; + + % for astnode in no_builtins(ctx.astnode_types): + % if not astnode.is_list_type: + ${astnode_types.public_incomplete_decl(astnode)} + % endif + % endfor + + % for astnode in ctx.astnode_types: + % if astnode.is_root_list_type: + ${list_types.public_incomplete_decl(astnode.element_type())} + % elif astnode.is_list_type: + ${astnode_types.public_incomplete_decl(astnode)} + % endif + % endfor + ------------------------------ -- Root AST node properties -- ------------------------------ @@ -843,18 +867,11 @@ package ${ada_lib_name}.Analysis is % endif % endfor - ------------------------------------------------------ - -- AST node derived types (incomplete declarations) -- - ------------------------------------------------------ - type ${generic_list_value_type} is abstract new ${root_node_value_type} with private; -- Base type for all lists of AST node subclasses - type ${generic_list_type_name} is - access all ${generic_list_value_type}'Class; - overriding function Image (Node : access ${generic_list_value_type}) return String; @@ -873,20 +890,6 @@ package ${ada_lib_name}.Analysis is overriding function Is_Empty_List (Node : access ${generic_list_value_type}) return Boolean; - % for astnode in no_builtins(ctx.astnode_types): - % if not astnode.is_list_type: - ${astnode_types.public_incomplete_decl(astnode)} - % endif - % endfor - - % for astnode in ctx.astnode_types: - % if astnode.is_root_list_type: - ${list_types.public_incomplete_decl(astnode.element_type())} - % elif astnode.is_list_type: - ${astnode_types.public_incomplete_decl(astnode)} - % endif - % endfor - ----------------------------------------- -- Structure types (full declarations) -- -----------------------------------------
[Cleanup] Remove unused variables and unreachanble code Fixed the following instances: scripts\script_wui.py:310: unsatisfiable 'if' condition
@@ -304,11 +304,6 @@ def main_script(page, rev=None, params=NotImplemented): def wiki_logger(buffer, page, rev=None): """Log to wiki.""" - # FIXME: what is this?? - # (might be a problem here for TS and SGE, output string has another - # encoding) - if False: - buffer = buffer.decode(pywikibot.config.console_encoding) buffer = re.sub(r'\03\{(.*?)\}(.*?)\03\{default\}', r'\g<2>', buffer) if rev is None: rev = page.latestRevision()
Add Credits, Legal sections to README also tweak link formatting
@@ -90,12 +90,26 @@ Finally, launch TF2 with only the `-default -autoconfig -console` launch options # Screenshots -[Screenshots are available on the wiki.](https://github.com/mastercoms/tf2cfg/wiki/Screenshots) +[Screenshots are available on the wiki](https://github.com/mastercoms/tf2cfg/wiki/Screenshots). # Troubleshooting -[You can find solutions to common problems on the wiki.](https://github.com/mastercoms/tf2cfg/wiki/Troubleshooting) +[You can find solutions to common problems on the wiki](https://github.com/mastercoms/tf2cfg/wiki/Troubleshooting). # Benchmarks -Benchmarks can be found on the [teamfortress.tv thread](http://www.teamfortress.tv/42867/mastercomfig-fps-customization-config/). +[Benchmarks can be found on the teamfortress.tv thread](http://www.teamfortress.tv/42867/mastercomfig-fps-customization-config/). + +# Credits + +* [Chris](https://chrisdown.name/tf2/) for starting it all +* [Comanglia](http://www.teamfortress.tv/25328/comanglias-config-fps-guide) for continuing what Chris started and helping a bit with my config +* [Rhapsody](http://rhapsodysl.github.io/perfconfig/) for updating Chris' config +* [Felik](http://www.teamfortress.tv/36792/feliks-config-chris-config-replacement) for providing an alternative to Chris-based configs +* The TeamFortress.TV community in the [config thread](http://www.teamfortress.tv/42867/mastercomfig-fps-customization-config/) for their continued support, advice and benchmarks. (Special Mentions to: amazoc, JackStanley, Setsul, Hopps, fagoatse, ZeRo5, stabby, Whisker, Vantavimeow, osvaldo, DarkNecrid, steph, Thole, gemm, sage and perhaps many others) +* [Valve Developer community](https://developer.valvesoftware.com/wiki/Main_Page) for their documentation of Source Engine console varialbes and mechanics +* and to Valve, for making and updating the best class-based FPS to date with so much customizability on top + +# Legal + +Valve, the Valve logo, Steam, the Steam logo, Team Fortress, the Team Fortress logo are trademarks and/or registered trademarks of Valve Corporation. Mastercomfig is not associated with nor endorsed by Valve Corporation.
Apply suggestions from code review Added suggested changes for Static builds on windows
from conans import ConanFile, tools, CMake -from conans.errors import ConanInvalidConfiguration import os import glob @@ -19,7 +18,7 @@ class EasyProfilerConan(ConanFile): "fPIC": [True, False] } default_options = { - "shared": True, + "shared": False, "fPIC": True } short_paths = True @@ -41,11 +40,6 @@ class EasyProfilerConan(ConanFile): def configure(self): if self.options.shared: del self.options.fPIC - # The windows build seems to be giving problems due to certain symbols - # not being exported properly for static libraries. - if not self.options.shared and self.settings.os == "Windows": - raise ConanInvalidConfiguration("Must be built as shared on \ - Windows") def source(self): tools.get(**self.conan_data["sources"][self.version]) @@ -86,3 +80,7 @@ class EasyProfilerConan(ConanFile): self.cpp_info.libs = ["easy_profiler"] if self.settings.os == "Linux": self.cpp_info.system_libs = ["m", "pthread"] + elif self.settings.os == "Windows": + self.cpp_info.system_libs = ["psapi", "ws2_32"] + if not self.options.shared: + self.cpp_info.defines.append("EASY_PROFILER_STATIC")
changed summing method, and added exception for image_id to utility script (bug fix)
@@ -286,12 +286,12 @@ def main(): finalfeatures = [] # Select the features that are not completely empty for x in fheader: - if x == 'in_bounds': + if x == 'in_bounds' or x=='image_id': finalfeatures.append(x) mheader.append(x) elif x != 'in_bounds': table1[str(x)] = table1[str(x)].astype(float) - sumcol = table1[str(x)].apply(np.sum)[1] + sumcol=((table1[str(x)].sum())) if sumcol != 0.0: finalfeatures.append(x) mheader.append(x) @@ -310,12 +310,12 @@ def main(): finalfeatures = [] # Select the features that are not completely empty for x in fheader: - if x == 'in_bounds': + if x == 'in_bounds' or x=='image_id': finalfeatures.append(x) mheader.append(x) elif x != 'in_bounds': table2[str(x)] = table2[str(x)].astype(float) - sumcol = table2[str(x)].apply(np.sum)[1] + sumcol=((table2[str(x)].sum())) if sumcol != 0.0: finalfeatures.append(x) mheader.append(x) @@ -334,16 +334,15 @@ def main(): finalfeatures = [] # Select the features that are not completely empty for x in fheader: - if x == 'in_bounds': + if x == 'in_bounds' or x=='image_id': finalfeatures.append(x) mheader.append(x) elif x != 'in_bounds': table[str(x)] = table[str(x)].astype(float) - sumcol = table[str(x)].apply(np.sum)[1] + sumcol=((table[str(x)].sum())) if sumcol != 0.0: finalfeatures.append(x) mheader.append(x) - # Subset table with the features that are not completely empty table = table[mheader] if args.filter=='filter':
[easy] Fix bad merge for location subscriber Summary: Somehow had a weird merge. This diff fixes it Test Plan: bk Reviewers: dgibson
@@ -189,11 +189,9 @@ def __init__( self._location_state_events_handler ) + self.version = version self.set_state_subscribers() - location.add_state_subscriber(self._location_state_subscriber) - self._repository_locations[location.name] = location - def set_state_subscribers(self): for location in self._workspace.repository_locations: location.add_state_subscriber(self._location_state_subscriber) @@ -202,7 +200,7 @@ def create_request_context(self) -> WorkspaceRequestContext: return WorkspaceRequestContext( instance=self.instance, workspace_snapshot=self._workspace.create_snapshot(), - repository_locations_dict=self._repository_locations.copy(), + repository_locations_dict=self._workspace.repository_locations_dict.copy(), process_context=self, version=self.version, ) @@ -240,10 +238,6 @@ def reload_repository_location(self, name: str) -> "WorkspaceProcessContext": if self._workspace.has_repository_location(name): new_location = self._workspace.get_repository_location(name) new_location.add_state_subscriber(self._location_state_subscriber) - check.invariant(new_location.name == name) - self._repository_locations[name] = new_location - elif name in self._repository_locations: - del self._repository_locations[name] return self
Remove API macros from intrusive_ptr Summary: Pull Request resolved: This is a templated header-only class and shouldn't need export/import macros.
#pragma once -#include <ATen/core/ATenGeneral.h> #include <c10/util/C++17.h> #include <c10/util/Exception.h> #include <atomic> @@ -114,7 +113,7 @@ class CAFFE2_API intrusive_ptr_target { namespace detail { template <class TTarget> -struct C10_EXPORT intrusive_target_default_null_type final { +struct intrusive_target_default_null_type final { static constexpr TTarget* singleton() noexcept { return nullptr; } @@ -136,7 +135,7 @@ class weak_intrusive_ptr; template < class TTarget, class NullType = detail::intrusive_target_default_null_type<TTarget>> -class C10_EXPORT intrusive_ptr final { +class intrusive_ptr final { private: // the following static assert would be nice to have but it requires // the target class T to be fully defined when intrusive_ptr<T> is instantiated @@ -394,7 +393,7 @@ inline bool operator!=( template < typename TTarget, class NullType = detail::intrusive_target_default_null_type<TTarget>> -class C10_EXPORT weak_intrusive_ptr final { +class weak_intrusive_ptr final { private: static_assert( std::is_base_of<intrusive_ptr_target, TTarget>::value, @@ -742,13 +741,13 @@ namespace std { // To allow intrusive_ptr and weak_intrusive_ptr inside std::unordered_map or // std::unordered_set, we need std::hash template <class TTarget, class NullType> -struct C10_EXPORT hash<c10::intrusive_ptr<TTarget, NullType>> { +struct hash<c10::intrusive_ptr<TTarget, NullType>> { size_t operator()(const c10::intrusive_ptr<TTarget, NullType>& x) const { return std::hash<TTarget*>()(x.get()); } }; template <class TTarget, class NullType> -struct C10_EXPORT hash<c10::weak_intrusive_ptr<TTarget, NullType>> { +struct hash<c10::weak_intrusive_ptr<TTarget, NullType>> { size_t operator()(const c10::weak_intrusive_ptr<TTarget, NullType>& x) const { return std::hash<TTarget*>()(x._unsafe_get_target()); }
Update tools.md remove legacy command line documentation and just leave the link to further documentation
@@ -99,77 +99,7 @@ random images requested must be less than or equal to the number of images in th `plantcv-workflow.py` is a command-line tool for parallel processing of user-defined PlantCV workflows. It is used to process metadata and execute custom workflows on each image in a dataset. More detail is provided in the -[Workflow Parallelization Tutorial](pipeline_parallel.md) but command/input details are provided below: - -``` -usage: plantcv-workflow.py [-h] -d DIR [-a ADAPTOR] -p WORKFLOW -j JSON - [-i OUTDIR] [-T CPU] [-c] [-D DATES] [-t TYPE] - [-l DELIMITER] [-f META] [-M MATCH] [-C COPROCESS] - [-w] [-o OTHER_ARGS] - -Parallel imaging processing with PlantCV. - -optional arguments: - -h, --help - Show this help message and exit - -d DIR, --dir DIR - Input directory containing images or snapshots. - (default: None) - -a ADAPTOR, --adaptor ADAPTOR - Image metadata reader adaptor. PhenoFront metadata is - stored in a CSV file and the image file name. For the - filename option, all metadata is stored in the image - file name. Current adaptors: phenofront, filename - (default: phenofront) - -p WORKFLOW, --workflow WORKFLOW - Workflow script file. (default: None) - -j JSON, --json JSON - Output database file name. (default: None) - -f META, --meta META - Image file name metadata format. List valid metadata - fields separated by commas. - Valid metadata fields are: camera, imgtype, zoom, - exposure, gain, frame, lifter, timestamp, id, - plantbarcode, treatment, cartag, measurementlabel, - other (default: imgtype_camera_frame_zoom_id) - -i OUTDIR, --outdir OUTDIR - Output directory for images. Not required by all - workflows. (default: .) - -T CPU, --cpu CPU - Number of CPU to use. (default: 1) - -c, --create - Will overwrite an existing database. Warning: activating - this option will delete an existing database! - (default: False) - -D DATES, --dates DATES - Date range. Format: YYYY-MM-DD-hh-mm-ss_YYYY-MM-DD-hh- - mm-ss. If the second date is excluded then the current - date is assumed. (default: None) - -t TYPE, --type TYPE - Image format type (extension). (default: png) - -l DELIMITER, --delimiter DELIMITER - Image file name metadata delimiter character. Alternatively, a regular expression for parsing filename metadata. - (default: _) - -M MATCH, --match MATCH - Restrict analysis to images with metadata matching - input criteria. Input a metadata:value comma-separated - list. This is an exact match search. E.g. - imgtype:VIS,camera:SV,zoom:z500 (default: None) - -C COPROCESS, --coprocess COPROCESS - Coprocess the specified imgtype with the imgtype - specified in --match (e.g. coprocess NIR images with - VIS). (default: None) - -s timestampformat, --timestampformat timestampformat - A date format code compatible with strptime C library. - e.g. "%%Y-%%m-%%d %%H_%%M_%%S", except "%%" symbols must be escaped on Windows with "%%" e.g. "%%%%Y-%%%%m-%%%%d %%%%H_%%%%M_%%%%S". - (default: "%%Y-%%m-%%d %%H:%%M:%%S.%%f") - -w, --writeimg - Include analysis images in output. (default: False) - -o OTHER_ARGS, --other_args OTHER_ARGS - Other arguments to pass to the workflow script. - (default: None) - -``` +[Workflow Parallelization Tutorial](pipeline_parallel.md). **Source Code:** [Here](https://github.com/danforthcenter/plantcv/blob/master/plantcv-workflow.py)
Langkit_Support.Iterators: use standard vectors ... instead of Langkit_Support.Vectors ones, as these don't handle tagged types. TN:
-with Langkit_Support.Vectors; +with Ada.Containers.Vectors; package body Langkit_Support.Iterators is @@ -22,7 +22,8 @@ package body Langkit_Support.Iterators is ------------- function Consume (I : Iterator'Class) return Element_Array is - package Element_Vectors is new Langkit_Support.Vectors (Element_Type); + package Element_Vectors is new Ada.Containers.Vectors + (Positive, Element_Type); Element : Element_Type; V : Element_Vectors.Vector; @@ -40,15 +41,17 @@ package body Langkit_Support.Iterators is -- You have to declare the iterator explicitly. while I'Unrestricted_Access.Next (Element) loop - Element_Vectors.Append (V, Element); + V.Append (Element); end loop; - return - Result : constant Element_Array := - Element_Array (Element_Vectors.To_Array (V)) - do - Element_Vectors.Destroy (V); - end return; + declare + Result : Element_Array (1 .. Natural (V.Length)); + begin + for I in Result'Range loop + Result (I) := V.Element (I); + end loop; + return Result; + end; end Consume; end Langkit_Support.Iterators;
Fix typo in `_block_parallel_sync_behavior` docstring Typo
@@ -153,7 +153,7 @@ def _build_training_step_kwargs( @contextmanager def _block_parallel_sync_behavior(strategy: Strategy, block: bool = True) -> Generator[None, None, None]: """Blocks synchronization in :class:`~pytorch_lightning.strategies.parallel.ParallelStrategy`. This is useful - for example when when accumulating gradients to reduce communication when it is not needed. + for example when accumulating gradients to reduce communication when it is not needed. Args: strategy: the strategy instance to use.
DOC: fix docstring in _cdf() function of _multivariate.py See issue Function computes the multivariate cdf, not the log of the same.
@@ -518,7 +518,7 @@ class multivariate_normal_gen(multi_rv_generic): return _squeeze_output(out) def _cdf(self, x, mean, cov, maxpts, abseps, releps): - """Log of the multivariate normal cumulative distribution function. + """Multivariate normal cumulative distribution function. Parameters ----------
Don't catch/pass YAMLError when parsing docstring fails. The current behavior is to swallow any parse errors silently, which makes trying to add more than a trivial apispec docstring confusing and frustrating. See issue for more discussion.
@@ -60,10 +60,7 @@ def load_yaml_from_docstring(docstring): yaml_string = "\n".join(split_lines[cut_from:]) yaml_string = dedent(yaml_string) - try: return yaml.load(yaml_string) - except yaml.YAMLError: - return None PATH_KEYS = set([ 'get',
Changelog mistake Used pytest.fixture.skipif instead of pytest.mark.skipif
-Minor Doc Fix: The description above the example for ``@pytest.fixture.skipif`` now matches the code +Minor Doc Fix: The description above the example for ``@pytest.mark.skipif`` now matches the code
Portable way of the warning clause Summary: Pull Request resolved:
#ifdef TORCH_API_INCLUDE_EXTENSION_H #include <torch/extension.h> -#warning \ + +#define DEPRECATE_MESSAGE \ "Including torch/torch.h for C++ extensions is deprecated. Please include torch/extension.h" + +#ifdef _MSC_VER +# pragma message ( DEPRECATE_MESSAGE ) +#else +# warning DEPRECATE_MESSAGE +#endif + #endif // defined(TORCH_API_INCLUDE_EXTENSION_H)
Set bone length when there are in chain Use distance to point to detect if a bone is in chain or not
@@ -227,6 +227,44 @@ class glTFImporter(): for scene in self.other_scenes: scene.blender_create() + # Armature correction + # Try to detect bone chains, and set bone lengths + # To detect if a bone is in a chain, we try to detect if a bone head is aligned + # with parent_bone : + ## Parent bone defined a line (between head & tail) + ## Bone head defined a point + ## Calcul of distance between point and line + ## If < threshold --> In a chain + ## Based on an idea of @Menithal, but added alignement detection to avoid some bad cases + + threshold = 0.001 + for armobj in [obj for obj in bpy.data.objects if obj.type == "ARMATURE"]: + bpy.context.scene.objects.active = armobj + armature = armobj.data + bpy.ops.object.mode_set(mode="EDIT") + for bone in armature.edit_bones: + if bone.parent is None: + continue + + parent = bone.parent + + # case where 2 bones are aligned (not in chain, same head) + if (bone.head - parent.head).length < threshold: + continue + + u = (parent.tail - parent.head).normalized() + point = bone.head + distance = ((point - parent.head).cross(u)).length / u.length + if distance < threshold: + save_parent_direction = (parent.tail - parent.head).normalized().copy() + save_parent_tail = parent.tail.copy() + parent.tail = bone.head + + # case where 2 bones are aligned (not in chain, same head) + # bone is no more is same direction + if (parent.tail - parent.head).normalized().dot(save_parent_direction) < 0.9: + parent.tail = save_parent_tail + def debug_missing(self): keys = [
Add ssh port forwarding to intercept It works now
@@ -5,6 +5,7 @@ import ( "fmt" "net/http" "strconv" + "time" "github.com/datawire/teleproxy/pkg/supervisor" "github.com/pkg/errors" @@ -139,21 +140,38 @@ type Intercept struct { ii *InterceptInfo tm *TrafficManager port int + crc Resource ResourceBase } // MakeIntercept acquires an intercept and returns a Resource handle // for it -func MakeIntercept(p *supervisor.Process, tm *TrafficManager, ii *InterceptInfo) (cept *Intercept, err error) { +func MakeIntercept(p *supervisor.Process, tm *TrafficManager, ii *InterceptInfo) (*Intercept, error) { port, err := ii.Acquire(p, tm) if err != nil { - return + return nil, err } - cept = &Intercept{ii: ii, tm: tm, port: port} + + cept := &Intercept{ii: ii, tm: tm, port: port} cept.doCheck = cept.check cept.doQuit = cept.quit cept.setup(p.Supervisor(), ii.Name) - return + + sshCmd := []string{ + "ssh", "-C", "-N", "telepresence@localhost", + "-oConnectTimeout=5", "-oExitOnForwardFailure=yes", + "-oStrictHostKeyChecking=no", "-oUserKnownHostsFile=/dev/null", + "-p", strconv.Itoa(tm.sshPort), + "-R", fmt.Sprintf("%d:%s:%d", cept.port, ii.TargetHost, ii.TargetPort), + } + ssh, err := CheckedRetryingCommand(p, ii.Name+"-ssh", sshCmd, nil, nil, 5*time.Second) + if err != nil { + _ = cept.Close() + return nil, err + } + cept.crc = ssh + + return cept, nil } func (cept *Intercept) check(p *supervisor.Process) error { @@ -162,5 +180,6 @@ func (cept *Intercept) check(p *supervisor.Process) error { func (cept *Intercept) quit(p *supervisor.Process) error { cept.done = true + _ = cept.crc.Close() return cept.ii.Release(p, cept.tm, cept.port) }
DPDK on Debian: buster 5.4 missing ib drivers Buster doesn't have an ib config 'out of box' on older versions and recommends using the latest kernel for backports. Add a check for buster and update if the kernel version doesn't ship a configuation for ib and rdma.
@@ -587,8 +587,25 @@ class DpdkTestpmd(Tool): else: mellanox_drivers = ["mlx5_core", "mlx5_ib"] modprobe = self.node.tools[Modprobe] - if isinstance(self.node.os, Debian): - modprobe.load("rdma_cm") + if isinstance(self.node.os, Debian) and not isinstance(self.node.os, Ubuntu): + # NOTE: debian buster doesn't include rdma and ib drivers + # on 5.4 specifically for linux-image-cloud: + # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1012639 + # for backports on this release we should update the kernel to latest + kernel_info = self.node.os.get_kernel_information(force_run=True) + # update to at least 5.10 (known good for buster linux-image-cloud-(arch)) + if ( + self.node.os.information.codename == "buster" + and kernel_info.version <= "5.10.0" + ): + self.node.log.debug( + f"Debian (buster) kernel version found: {str(kernel_info.version)} " + "Updating linux-image-cloud to most recent kernel." + ) + # grab the linux-image package name from kernel version metadata + linux_image_package = "linux-image-cloud-[a-zA-Z0-9]*" + self.node.os.install_packages([linux_image_package]) + self.node.reboot() elif isinstance(self.node.os, Fedora): if not self.is_connect_x3: self.node.execute(
Add client cert support via secrets This commit adds support for enabling client cert authentication. Support for requiring the cert using `cert_required` literal in the Kubernetes secret has also been added back.
# See the License for the specific language governing permissions and # limitations under the License +import base64 import click import json import logging @@ -112,20 +113,40 @@ class Restarter(threading.Thread): def tls_secret_resolver(self, secret_name: str, context: str, cert_dir=None) -> Optional[Dict[str, str]]: (cert, key, data) = read_cert_secret(kube_v1(), secret_name, self.namespace) - if not (cert and key): + if not cert: + logger.error("no certificate found in secret {}".format(secret_name)) return None certificate_chain_path = "" private_key_path = "" + resolved = {} if context == 'server': + if not key: + logger.error("no key found in secret {} for context {}".format(secret_name, context)) + return None cert_dir = TLSPaths.cert_dir.value certificate_chain_path = TLSPaths.tls_crt.value private_key_path = TLSPaths.tls_key.value + resolved = { + 'certificate_chain_file': certificate_chain_path, + 'private_key_file': private_key_path + } elif context == 'client': - # TODO - pass + cert_dir = TLSPaths.client_cert_dir.value + certificate_chain_path = TLSPaths.client_tls_crt.value + resolved = { + 'cacert_chain_file': certificate_chain_path, + } + + cert_required = data.get('cert_required') + if cert_required is not None: + decoded = base64.b64decode(cert_required).decode('utf-8').lower() == 'true' + resolved['certificate_required'] = decoded else: + if not key: + logger.error("no key found in secret {} for context {}".format(secret_name, context)) + return None if cert_dir is None: cert_dir = os.path.join("/ambassador/", context) @@ -133,14 +154,16 @@ class Restarter(threading.Thread): certificate_chain_path = cert_paths['crt'] private_key_path = cert_paths['key'] - logger.debug("saving contents of secret %s to %s for context %s" % (secret_name, cert_dir, context)) - save_cert(cert, key, cert_dir) - - return { + resolved = { 'certificate_chain_file': certificate_chain_path, 'private_key_file': private_key_path } + logger.debug("saving contents of secret %s to %s for context %s" % (secret_name, cert_dir, context)) + save_cert(cert, key, cert_dir) + + return resolved + def read_fs(self, path): if os.path.exists(path): logger.debug("Merging config inputs from %s" % path)
Delete duplicate 'timeout' tests for notifications These scenarios are already covered by the DAO tests. It's enough to just check the DAO function is called as expected. While sometimes it can be better to have more end-to-end tests, the convention across much of this app is to do unit tests.
@@ -166,47 +166,15 @@ def test_delete_letter_notifications_older_than_retention_calls_child_task(notif mocked.assert_called_once_with('letter') -def test_timeout_notifications_after_timeout(notify_api, sample_template): - not1 = create_notification( - template=sample_template, - status='sending', - created_at=datetime.utcnow() - timedelta( - seconds=current_app.config.get('SENDING_NOTIFICATIONS_TIMEOUT_PERIOD') + 10)) - not2 = create_notification( - template=sample_template, - status='created', - created_at=datetime.utcnow() - timedelta( - seconds=current_app.config.get('SENDING_NOTIFICATIONS_TIMEOUT_PERIOD') + 10)) - not3 = create_notification( - template=sample_template, - status='pending', - created_at=datetime.utcnow() - timedelta( - seconds=current_app.config.get('SENDING_NOTIFICATIONS_TIMEOUT_PERIOD') + 10)) - timeout_notifications() - assert not1.status == 'temporary-failure' - assert not2.status == 'created' - assert not3.status == 'temporary-failure' - - -def test_timeout_notifications_before_timeout(notify_api, sample_template): - not1 = create_notification( - template=sample_template, - status='sending', - created_at=datetime.utcnow() - timedelta( - seconds=current_app.config.get('SENDING_NOTIFICATIONS_TIMEOUT_PERIOD') - 10)) - timeout_notifications() - assert not1.status == 'sending' - - -def test_timeout_notifications_avoids_letters(client, sample_letter_template): - created_at = datetime.utcnow() - timedelta(days=5) - not1 = create_notification(template=sample_letter_template, status='sending', created_at=created_at) - not2 = create_notification(template=sample_letter_template, status='created', created_at=created_at) +def test_timeout_notifications(mocker, notify_api, sample_notification): + mock_dao = mocker.patch('app.celery.nightly_tasks.dao_timeout_notifications') + mock_dao.return_value = [sample_notification] timeout_notifications() - assert not1.status == 'sending' - assert not2.status == 'created' + mock_dao.assert_called_once_with( + current_app.config.get('SENDING_NOTIFICATIONS_TIMEOUT_PERIOD') + ) def test_timeout_notifications_sends_status_update_to_service(client, sample_template, mocker):
.travis: Backup travis provided venv files before caching the directories Restore in after_script This should avoid:
@@ -19,6 +19,16 @@ cache: directories: - $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/lib/python${TRAVIS_PYTHON_VERSION}/site-packages - $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin +# These files are provided by travis do not overwrite them with cached version. +# We need to preserve them because after_script still needs working python environment +before_cache: + - mkdir -p $HOME/venv-bak/{bin,site-packages} + - mv -vf $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/lib/python${TRAVIS_PYTHON_VERSION}/site-packages/pip* $HOME/venv-bak/site-packages + - mv -vf $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/lib/python${TRAVIS_PYTHON_VERSION}/site-packages/easy-intall* $HOME/venv-bak/site-packages + - mv -vf $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/pip* $HOME/venv-bak/bin + - mv -vf $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/easy-install* $HOME/venv-bak/bin + - mv -vf $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/python* $HOME/vevn-bak/bin + - mv -vf $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/activate* $HOME/venv-bak/bin matrix: include: @@ -106,4 +116,7 @@ script: - python -m pytest -p no:logging --cov=psyneulink tests/ after_script: + # Restore python environment + - mv -vf $HOME/venv-bak/site-packages/* $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/lib/python${TRAVIS_PYTHON_VERSION}/site-packages/ + - mv -vf $HOME/venv-bak/bin/* $HOME/virtualenv/python${TRAVIS_PYTHON_VERSION}/bin/ - coveralls
removed unused ipython sphinx extensions at present we're not installing ipython by default...
@@ -50,8 +50,8 @@ extensions = [ # 'sphinx.ext.viewcode', 'sphinx.ext.linkcode', 'sphinx.ext.napoleon', - 'IPython.sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_directive', +# 'IPython.sphinxext.ipython_console_highlighting', +# 'IPython.sphinxext.ipython_directive', ] napoleon_use_ivar = True
settings_users: Add last_active to active_users for "users" table. This is a preliminary step for refactoring the logic for rendering "last_active" in the users table and later we can use this for sorting the column.
@@ -105,6 +105,20 @@ function failed_listing_users(xhr) { ui_report.error(i18n.t("Error listing users or bots"), xhr, status); } +var LAST_ACTIVE_NEVER = -1; +var LAST_ACTIVE_UNKNOWN = -2; + +function get_last_active(user) { + var presence_info = presence.presence_info[user.user_id]; + if (!presence_info) { + return LAST_ACTIVE_UNKNOWN; + } + if (!isNaN(presence_info.last_active)) { + return presence_info.last_active; + } + return LAST_ACTIVE_NEVER; +} + function populate_users(realm_people_data) { var active_users = []; var deactivated_users = []; @@ -116,6 +130,7 @@ function populate_users(realm_people_data) { user.bot_type = settings_bots.type_id_to_string(user.bot_type); bots.push(user); } else if (user.is_active) { + user.last_active = get_last_active(user); active_users.push(user); } else { deactivated_users.push(user);
Begins work on feature-cbits branch. Adds arguments to StateSpaceLabels.__init__ for classical state space labels. Not much done yet.
@@ -382,7 +382,8 @@ class StateSpaceLabels(object): spaces. """ - def __init__(self, labelList, dims=None): + def __init__(self, labelList, dims=None, + classicalLabelList=None, classicalDims=None): """ Creates a new StateSpaceLabels object. @@ -414,6 +415,17 @@ class StateSpaceLabels(object): - if the label starts with 'L', dim=1 (a single Level) - if the label starts with 'Q' OR is an int, dim=2 (a Qubit) - if the label starts with 'T', dim=3 (a quTrit) + + classicalLabelList : iterable, optional + A list of classical-state labels. These labels must be strings, + e.g. `['C0','C1']`. If `None`, then there is no classical + portion of the constructed state space. + + classicalDims : iterable, optional + The dimension of each classical state space label. Must be the + same size as `classicalLabelList`. If `None`, then all the + classical state-space labels are assumed to be bits, i.e., to + have dimension 2. """ #Allow initialization via another StateSpaceLabels object @@ -477,7 +489,17 @@ class StateSpaceLabels(object): self.dim = _Dim(tpb_dims) #Note: access tensor-prod-block dims via self.dim.blockDims - def num_tensor_prod_blocks(self): + # init classical space + if classicalLabelList is not None: + # the full state space is the quantum state space tensored with the classical space. + if classicalDims is None: classicalDims = [2]*len(classicalLabelList) + totClassicalDim = _np.product(classicalDims) + self.labels = self.labels * totClassicalDim + #TODO: update other member variables... + # may want to have labels for quantum and classical sides separately e.g. not label ['Q0_C0','Q0_C1'] for qubit+bit + + + def num_tensor_prod_blocks(self): # only in modelconstruction.py """ Get the number of tensor-product blocks which are direct-summed to get the final state space. @@ -488,7 +510,7 @@ class StateSpaceLabels(object): """ return len(self.labels) - def tensor_product_block_labels(self, iTPB): + def tensor_product_block_labels(self, iTPB): # unused """ Get the labels for the `iTBP`-th tensor-product block. @@ -504,7 +526,7 @@ class StateSpaceLabels(object): """ return self.labels[iTPB] - def tensor_product_block_dims(self, iTPB): + def tensor_product_block_dims(self, iTPB): # unused """ Get the dimension corresponding to each label in the `iTBP`-th tensor-product block. The dimension of the @@ -523,7 +545,7 @@ class StateSpaceLabels(object): return tuple((self.labeldims[lbl] for lbl in self.labels[iTPB])) - def product_dim(self, labels): + def product_dim(self, labels): # only in modelconstruction """ Computes the product of the state-space dimensions associated with each label in `labels`.
TexText package prepared for extension manager This relates to the package of type TexText-Inkscape-x.y.z.zip. It is modified such that it can be handled by the extension manager. no setup scripts LICENCSE file moved into extension directory No INSTALL.TXT
@@ -75,18 +75,17 @@ if __name__ == "__main__": with TmpDir() as tmpdir: versioned_subdir = os.path.join(tmpdir,"textext-%s" % TexTextVersion) + extension_subdir = os.path.join(versioned_subdir, "textext") os.mkdir(versioned_subdir) shutil.copytree("./textext", - os.path.join(versioned_subdir, "textext"), + extension_subdir, ignore=git_ignore_patterns # exclude .gitignore files ) + shutil.copy("LICENSE.txt", extension_subdir) + if platform != "inkscape": shutil.copy("setup.py", versioned_subdir) - shutil.copy("LICENSE.txt", versioned_subdir) if platform == "windows": shutil.copy("setup_win.bat", versioned_subdir) - if platform == "inkscape": - shutil.copy("setup_win.bat", versioned_subdir) - shutil.copy("INSTALL.txt", versioned_subdir) for fmt in formats: filename = shutil.make_archive(PackageName, fmt, tmpdir) print("Successfully created %s" % os.path.basename(filename))
WIP: toward automated deploy working from suggestions made by - many thanks! this will take a few iterations
@@ -42,6 +42,16 @@ before_install: install: - python3 setup.py install + - travis_wait 30 python3 setup.py sdist bdist_wheel + +deploy: + provider: releases + api_key: $GITHUB_ACCESS_TOKEN + file_glob: true + file: /home/travis/build/ANTsX/ANTsPy/dist/antspyx*.whl + skip_cleanup: true + on: + tags: true script: ./tests/run_tests_travis.sh
Update test_mumbai.py Fix bug introduced in previous commit. Test now passes locally
@@ -78,7 +78,7 @@ def _get_wallets(ocean): # wallets n_confirm, timeout = config["BLOCK_CONFIRMATIONS"], config["TRANSACTION_TIMEOUT"] alice_wallet = Wallet(web3, alice_private_key, n_confirm, timeout) - alice_wallet = Wallet(web3, bob_private_key, n_confirm, timeout) + bob_wallet = Wallet(web3, bob_private_key, n_confirm, timeout) print(f"alice_wallet.address = '{alice_wallet.address}'") print(f"bob_wallet.address = '{bob_wallet.address}'")
Fix caffe2 build failure on Windows Summary: Fixes Looks like CMake is passing `/MD` when we call `add_library`. We need to fix these with C source files too. Pull Request resolved:
@@ -347,7 +347,9 @@ if(NOT MSVC) else() foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) + CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO + CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE + CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO) if (${CAFFE2_USE_MSVC_STATIC_RUNTIME}) if(${flag_var} MATCHES "/MD") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")