message
stringlengths
13
484
diff
stringlengths
38
4.63k
Update version 0.9.0 -> 0.9.1 Support dwave-cloud-client 0.7.x
# ============================================================================= __all__ = ['__version__', '__author__', '__authoremail__', '__description__'] -__version__ = '0.9.0' +__version__ = '0.9.1' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'All things D-Wave System.'
Move StructType/ASTNodeType fields elaboration out of StructMetaclass TN:
@@ -1378,10 +1378,6 @@ class StructMetaclass(CompiledTypeMetaclass): assert sum(1 for b in [is_astnode, is_struct] if b) == 1 assert sum(1 for b in [is_base, is_root_grammar_class] if b) <= 1 - # Get the fields this class defines. Remove them as class members: we - # want them to be stored in their own dict (see "cls.fields" below). - dct_fields = [] if is_base else dct['_fields'] - env_spec = dct.get('_env_spec', None) assert env_spec is None or is_astnode dct['is_env_spec_inherited'] = env_spec is None @@ -1442,23 +1438,8 @@ class StructMetaclass(CompiledTypeMetaclass): else: mcs.astnode_types.append(cls) - # This builds a list of fields in a specific order: first builtin - # fields, then subclass-specific fields. - fields = OrderedDict( - (mcs.builtin_properties() if is_root_grammar_class else []) - + dct_fields - ) - - # Associate each field and property to this ASTNodeType subclass, and - # assign them their name. Likewise for the environment specification. - for f_n, f_v in fields.items(): - f_v.struct = cls - f_v.name = names.Name.from_lower(f_n) if env_spec: env_spec.ast_node = cls - - cls._fields = fields - return cls @classmethod @@ -1979,7 +1960,12 @@ def init_base_struct(cls, name, location, doc, fields): cls._name = (name + names.Name('Node')) if is_keyword(name) else name cls.location = location cls._doc = doc - del fields # TODO: move fields initialization code here + + # Associate each field and property to this subclass and assign them names + for f_n, f_v in fields: + f_v.name = names.Name.from_lower(f_n) + f_v.struct = cls + cls._fields = OrderedDict(fields) # No matter what, reset all caches so that subclass don't "magically" # inherit their parent's. @@ -2352,6 +2338,9 @@ def create_astnode(name, location, doc, base, fields, repr_name=None, '_has_abstract_list': has_abstract_list, } cls = type(name.camel, (ASTNodeType, ) if base is None else (base, ), dct) + + if is_root: + fields = StructMetaclass.builtin_properties() + fields init_base_struct(cls, name, location, doc, fields) # If this is the root grammar type, create the generic list type name
Fix dev version Wasn't showing git commit
@@ -52,7 +52,7 @@ def _try_init_git_attrs(): def _init_git_commit(): repo = _guild_repo() if repo: - line = _cmd_out("git -C \"%s\" log -1 --oneline" % repo) + line = _cmd_out("git --work-tree \"%s\" log -1 --oneline" % repo) commit = line.split(" ")[0] else: commit = None
Fix a bug raised by issue 3372. * Fix a bug raised by issue 3372. corner case: the input tensor may also be the input tensor of the whole model.
@@ -441,7 +441,11 @@ class TorchModuleGraph(TorchGraph): input_tensors = list(list_construct_cpp.inputs()) for _tensor in input_tensors: debug_name = _tensor.debugName() + if debug_name in self.output_to_node: input_order.append(self.output_to_node[debug_name].unique_name) + else: + # the input tensor may be the input tensor of the whole model + input_order.append(None) cat_info['in_order'] = input_order input_shapes = [t.type().sizes() for t in input_tensors] cat_info['in_shape'] = input_shapes
Correct test--caching requires files on disk but the test just supplies what would have been read from disk.
@@ -101,8 +101,9 @@ class CryptTestCase(TestCase): salt.utils.fopen.assert_has_calls([open_priv_wb, open_pub_wb], any_order=True) def test_sign_message(self): - with patch('salt.utils.fopen', mock_open(read_data=PRIVKEY_DATA)): - self.assertEqual(SIG, crypt.sign_message('/keydir/keyname.pem', MSG)) + key = Crypto.PublicKey.RSA.importKey(PRIVKEY_DATA) + with patch('salt.crypt._get_rsa_key', return_value=key): + self.assertEqual(SIG, salt.crypt.sign_message('/keydir/keyname.pem', MSG)) def test_verify_signature(self): with patch('salt.utils.fopen', mock_open(read_data=PUBKEY_DATA)):
DOC: Give a sense for why plans are useful earlier in the tutorial. This incorporates early feedback from Thanks!
@@ -287,7 +287,15 @@ Try the following variations: The :func:`~bluesky.plans.count` function (more precisely, Python *generator function*) is an example of a *plan*, a sequence of instructions encoding an experimental procedure. We'll get a better sense for why this design is useful -as we continue. +as we continue. Briefly, it empowers us to: + +* Introspect the instructions before we execute them, checking for accuracy, + safety, estimated duration, etc. +* Interrupt and "rewind" the instructions to a safe point to resume from, + both interactively and automatically (e.g. in the middle of the night). +* Reuse a generic set of instructions on different hardware. +* Modify the instructions programmatically, such as inserting a set of + baseline readings to be taken automatically before every experiment. .. warning::
Fix bug in run_static() Fix a bug when modeling too flexible bearings, displacement were getting larger than acceptable. It uses axuliar bearings with high stiffness, considering almost zero displacement in bearing nodes.
@@ -1902,8 +1902,15 @@ class Rotor(object): for node_y in range(int(len(self.M()) / 4)): grav[4 * node_y + 1] = -9.8065 + aux_brg = [] + for n in self.df_bearings["n"]: + aux_brg.append(BearingElement(n=n, kxx=1e14, cxx=0)) + + aux_rotor = Rotor(self.shaft_elements, self.disk_elements, aux_brg) + aux_K = aux_rotor.K(0) + # calculates x, for [K]*(x) = [M]*(g) - disp = (la.solve(self.K(0), self.M() @ grav)).flatten() + disp = (la.solve(aux_K, self.M() @ grav)).flatten() # calculates displacement values in gravity's direction # dof = degree of freedom @@ -1921,10 +1928,10 @@ class Rotor(object): BrgForceToReturn = [] for i, node in enumerate(self.df_bearings["n"]): BrgForce[node] = ( - -disp_y[node] * self.df_bearings.loc[i, "kyy"].coefficient[0] + -disp_y[node] * aux_rotor.df_bearings.loc[i, "kyy"].coefficient[0] ) BrgForceToReturn.append( - np.around(-disp_y[node] * self.df_bearings.loc[i, "kyy"].coefficient[0], decimals=1) + np.around(-disp_y[node] * aux_rotor.df_bearings.loc[i, "kyy"].coefficient[0], decimals=1) ) # Disk Forces @@ -2004,7 +2011,7 @@ class Rotor(object): self.Bm, self.df_shaft, self.df_disks, - self.df_bearings, + aux_rotor.df_bearings, self.nodes, self.nodes_pos, Vx_axis,
Removed autopep8 and use flake8 after black `black` already does everything which `autopep8` can. Also with `black` we don't need to use `flake8` for formatting, but we still can use it to find errors, so let's run it after `black`.
@@ -31,21 +31,17 @@ repos: hooks: - id: pyupgrade args: [--py36-plus] -- repo: https://gitlab.com/pycqa/flake8 - rev: 3.7.7 - hooks: - - id: flake8 - exclude: ^docs/source/conf.py$ -- repo: https://github.com/pre-commit/mirrors-autopep8 - rev: v1.4.3 - hooks: - - id: autopep8 - repo: https://github.com/psf/black rev: stable hooks: - id: black language_version: python3.6 args: ['--target-version', 'py36'] +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.7.7 + hooks: + - id: flake8 + exclude: ^docs/source/conf.py$ - repo: https://github.com/Yelp/detect-secrets rev: 0.9.1 hooks:
MNT: remove pending_cancel_exception We should always handle the CancelExceptions internally
@@ -1162,7 +1162,6 @@ class RunEngine: with self._state_lock: self._task = current_task(self.loop) debug = logging.getLogger('{}.msg'.format(self.log.name)).debug - pending_cancel_exception = None self._reason = '' # sentinel to decide if need to add to the response stack or not sentinel = object() @@ -1373,7 +1372,6 @@ class RunEngine: # raised error is not already stashed in _exception if self._exception is None: self._exception = e - pending_cancel_exception = e finally: # if we poped a response and did not pop a plan, we need # to put the new response back on the stack @@ -1444,9 +1442,7 @@ class RunEngine: 'Please fix your plan.'.format(p)) self._state = 'idle' - # if the task was cancelled - if pending_cancel_exception is not None: - raise pending_cancel_exception + self.log.info("Cleaned up from plan %r", self._plan) async def _wait_for(self, msg):
build-prereq.sh updates for Debian: Add support for CLIF prerequisite. Add public key for Oracle Java PPA. Without this the Java install fails on Debian.
@@ -60,6 +60,11 @@ note_build_stage "Install Java and friends" if ! java -version 2>&1 | fgrep "1.8"; then echo "No Java 8, will install." sudo -H apt-get install -y software-properties-common debconf-utils + # Debian needs authentication. + # (http://www.webupd8.org/2014/03/how-to-install-oracle-java-8-in-debian.html) + [[ $(lsb_release -d | grep 'Debian') ]] && \ + sudo -H apt-get install -y gnupg dirmngr && \ + sudo -H apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886 sudo add-apt-repository -y ppa:webupd8team/java sudo -H apt-get -qq -y update echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" | sudo debconf-set-selections @@ -117,6 +122,8 @@ else case "$(lsb_release -d)" in *Ubuntu*16.*.*) export DV_PLATFORM="ubuntu-16" ;; *Ubuntu*14.*.*) export DV_PLATFORM="ubuntu-14" ;; + *Debian*9.*) export DV_PLATFORM="debian" ;; + *Debian*rodete) export DV_PLATFORM="debian" ;; *) echo "CLIF is not installed on this machine and a prebuilt binary is not unavailable for this platform. Please install CLIF at https://github.com/google/clif before continuing."
Predicate: fix arguments checking In particular, properly reject when there are too many arguments or missing ones. TN:
from __future__ import absolute_import, division, print_function + +from itertools import izip_longest + import funcy -from langkit.compiled_types import (T, bool_type, equation_type, +from langkit import names +from langkit.compiled_types import (Argument, T, bool_type, equation_type, logic_var_type, no_compiled_type) from langkit.diagnostics import check_multiple, check_source_language from langkit.expressions.base import ( @@ -419,41 +423,62 @@ class Predicate(AbstractExpression): )), ]) - exprs = [construct(e) for e in self.exprs] - - prop_types = [a.type for a in self.pred_property.natural_arguments] - # Separate logic variable expressions from extra argument expressions + exprs = [construct(e) for e in self.exprs] logic_var_exprs, closure_exprs = funcy.split_by( lambda e: e.type == logic_var_type, exprs ) - check_source_language( len(logic_var_exprs) > 0, "Predicate instantiation should have at " "least one logic variable expression" ) + check_source_language( + all(e.type != logic_var_type for e in closure_exprs), + 'Logic variable expressions should be grouped at the beginning,' + ' and should not appear after non logic variable expressions' + ) + + # Compute the list of arguments to pass to the property (Self + # included). + args = ([Argument(names.Name('Self'), + self.pred_property.struct.entity)] + + self.pred_property.natural_arguments) + + # Then check that 1) all extra passed actuals match what the property + # arguments expect and that 2) arguments left without an actual have a + # default value. + for i, (expr, arg) in enumerate(izip_longest(exprs, args)): + + if expr is None: + check_source_language( + False, + 'Missing an actual for argument #{} ({})'.format( + i, arg.name.lower + ) + ) + continue check_source_language( - all(e.type != logic_var_type for e in closure_exprs), "Logic " - "variable expressions should be grouped at the beginning, and " - "should not appear after non logic variable expressions" + arg is not None, + 'Too many actuals: at most {} expected, got {}'.format( + len(args), len(exprs) + ) ) - for i, (expr, arg_type) in enumerate(zip(exprs, prop_types)): if expr.type == logic_var_type: check_source_language( - arg_type.matches(T.root_node.entity), + arg.type.matches(T.root_node.entity), "Argument #{} of predicate " "is a logic variable, the corresponding property formal " "has type {}, but should be a descendent of {}".format( - i, arg_type.dsl_name, T.root_node.entity.dsl_name + i, arg.type.dsl_name, T.root_node.entity.dsl_name ) ) else: check_source_language( - expr.type.matches(arg_type), "Argument #{} of predicate " + expr.type.matches(arg.type), "Argument #{} of predicate " "has type {}, should be {}".format( - i, expr.type.dsl_name, arg_type.dsl_name + i, expr.type.dsl_name, arg.type.dsl_name ) )
Do not emit GDB helper directives without generating GDB hooks These directives contain absolute paths to source files, so we want to keep them out of release builds. TN:
@@ -26,7 +26,8 @@ def gdb_helper(*args): :param list[str] args: Elements of the special comment. :rtype: str """ - return '--# {}'.format(' '.join(pipes.quote(a) for a in args)) + return ('--# {}'.format(' '.join(pipes.quote(a) for a in args)) + if get_context().emitter.generate_gdb_hook else '') def precise_types_doc(label, types):
linkifiers: Add `title` attribute to `Delete` button. This commit adds `title` attribute and removes `aria-hidden` attribute in `Delete` button in linkifiers table. `aria-hidden` attribute is used only for icons on buttons that have a plain-text label.
</td> {{#if ../can_modify}} <td class="no-select actions"> - <button class="button small delete btn-danger" data-linkifier-id="{{id}}"> - <i class="fa fa-trash-o" aria-hidden="true"></i> + <button class="button small delete btn-danger" data-linkifier-id="{{id}}" title="{{t 'Delete' }}" aria-label="{{t 'Delete' }}"> + <i class="fa fa-trash-o"></i> </button> </td> {{/if}}
Update pin.py SDA SCL 0 and SDA SCL 1 order modify
@@ -66,10 +66,17 @@ class Pin: GPIO.cleanup() # Cannot be used as GPIO -SDA = Pin('GEN1_I2C_SDA') -SCL = Pin('GEN1_I2C_SCL') -SDA_1 = Pin('GEN2_I2C_SDA') -SCL_1 = Pin('GEN2_I2C_SCL') +# before # +#SDA = Pin('GEN1_I2C_SDA') +#SCL = Pin('GEN1_I2C_SCL') +#SDA_1 = Pin('GEN2_I2C_SDA') +#SCL_1 = Pin('GEN2_I2C_SCL') + +# after # +SDA = Pin('GEN2_I2C_SDA') +SCL = Pin('GEN2_I2C_SCL') +SDA_1 = Pin('GEN1_I2C_SDA') +SCL_1 = Pin('GEN1_I2C_SCL') # These pins are native to TX1 BB03 = Pin('GPIO_X1_AUD')
Update main.py Fixed typo in summary: changed enabled to disabled
@@ -98,7 +98,7 @@ def main(args, pacu_main): def summary(data, pacu_main): - out = ' {} instances have termination protection enabled\n'.format(data['instance_count']) + out = ' {} instances have termination protection disabled\n'.format(data['instance_count']) if data['instance_count'] > 0: out += ' Instances without termination protection have been written to: {}\n'.format(data['csv_file_path']) return out
Update CONTRIBUTING.md removed mention of Trello for bug tracking
Thank you for your interest in contributing! If you haven't already, drop us a line on [email protected]. We want you working on things you're excited about. -We use GitHub issues for suggestions, and [Trello](https://trello.com/b/RGR9BttD/oxford-data-lab) is our bug tracking system. Mail us and we'll add you. +We use GitHub issues for suggestions and for bug tracking. ## Testing
Fix Cassandra cluster restart Look the node up by both '127.0.0.1' and the private IP (if not found).
@@ -766,7 +766,7 @@ class CassandraAppStatus(service.BaseDbStatus): def _get_actual_db_status(self): try: - self.client.execute('SELECT now() FROM system.local;') + if self.client.local_node_is_up(): return rd_instance.ServiceStatuses.RUNNING except NoHostAvailable: return rd_instance.ServiceStatuses.SHUTDOWN @@ -1238,6 +1238,20 @@ class CassandraConnection(object): return query.format(*identifiers) return query + def node_is_up(self, host_ip): + """Test whether the Cassandra node located at the given IP is up. + """ + for host in self._cluster.metadata.all_hosts(): + if host.address == host_ip: + return host.is_up + return False + + def local_node_is_up(self): + """Test whether Cassandra is up on the localhost. + """ + return (self.node_is_up('127.0.0.1') or + self.node_is_up(netutils.get_my_ipv4())) + def _connect(self): if not self._cluster.is_shutdown: LOG.debug("Connecting to a Cassandra cluster as '%s'."
Fix char scaping in legacy printer Fixed code that identifies leaf nodes (now supports <edge label>-of)
@@ -362,7 +362,7 @@ def get_simple_graph(graph): def legacy_graph_printer(metadata, nodes, root, edges): - # These symbols can not be used directly for nodes + # These symbols can not be used directly for node names must_scape_symbols = [':', '/', '(', ')'] # start from meta-data @@ -372,6 +372,7 @@ def legacy_graph_printer(metadata, nodes, root, edges): # find leaf nodes non_leaf_ids = set() for (src, label, trg) in edges: + if not label.endswith('-of'): non_leaf_ids.add(src) leaf_ids = set(nodes.keys()) - non_leaf_ids # Find leaf nodes at end of :op or numeric ones
Grammar fix Use 'if' instead of 'in case'
@@ -233,7 +233,7 @@ You can now call all of Salt's CLI tools without explicitly passing the configur Additional Options .................. -In case you want to distribute your virtualenv, you probably don't want to +If you want to distribute your virtualenv, you probably don't want to include Salt's clone ``.git/`` directory, and, without it, Salt won't report the accurate version. You can tell ``setup.py`` to generate the hardcoded version information which is distributable:
tests: add vectorized roundtrip test to fixed/inertial planetary frames This test ensures conversion from and to fixed frames is possible with vectorized input and yields the same coordinates if coming back to the original coordinate system.
+import numpy as np import pytest from astropy import units as u from astropy.coordinates import ( @@ -146,7 +147,7 @@ def test_planetary_fixed_inertial_conversion(body, fixed_frame, inertial_frame): fixed_position = fixed_frame( 0 * u.deg, 0 * u.deg, body.R, obstime=epoch, representation_type="spherical" ) - inertial_position = fixed_position.transform_to(inertial_frame) + inertial_position = fixed_position.transform_to(inertial_frame(obstime=epoch)) assert_quantity_allclose( fixed_position.spherical.distance, body.R, atol=1e-7 * u.km ) @@ -176,7 +177,7 @@ def test_planetary_inertial_fixed_conversion(body, fixed_frame, inertial_frame): inertial_position = inertial_frame( 0 * u.deg, 0 * u.deg, body.R, obstime=epoch, representation_type="spherical" ) - fixed_position = inertial_position.transform_to(fixed_frame) + fixed_position = inertial_position.transform_to(fixed_frame(obstime=epoch)) assert_quantity_allclose( fixed_position.spherical.distance, body.R, atol=1e-7 * u.km ) @@ -185,6 +186,45 @@ def test_planetary_inertial_fixed_conversion(body, fixed_frame, inertial_frame): ) [email protected]( + "body, fixed_frame, inertial_frame", + [ + (Sun, SunFixed, HCRS), + (Mercury, MercuryFixed, MercuryICRS), + (Venus, VenusFixed, VenusICRS), + (Earth, ITRS, GCRS), + (Mars, MarsFixed, MarsICRS), + (Jupiter, JupiterFixed, JupiterICRS), + (Saturn, SaturnFixed, SaturnICRS), + (Uranus, UranusFixed, UranusICRS), + (Neptune, NeptuneFixed, NeptuneICRS), + (Pluto, PlutoFixed, PlutoICRS), + ], +) +def test_planetary_inertial_roundtrip_vector(body, fixed_frame, inertial_frame): + with solar_system_ephemeris.set("builtin"): + epoch = J2000 + sampling_time = 10 * u.s + fixed_position = fixed_frame( + np.broadcast_to(0 * u.deg, (1000,), subok=True), + np.broadcast_to(0 * u.deg, (1000,), subok=True), + np.broadcast_to(body.R, (1000,), subok=True), + representation_type="spherical", + obstime=epoch + np.arange(1000) * sampling_time, + ) + inertial_position = fixed_position.transform_to( + inertial_frame(obstime=epoch + np.arange(1000) * sampling_time) + ) + fixed_position_roundtrip = inertial_position.transform_to( + fixed_frame(obstime=epoch + np.arange(1000) * sampling_time) + ) + assert_quantity_allclose( + fixed_position.cartesian.xyz, + fixed_position_roundtrip.cartesian.xyz, + atol=1e-7 * u.km, + ) + + def test_round_trip_from_GeocentricSolarEcliptic_gives_same_results(): gcrs = GCRS(ra="02h31m49.09s", dec="+89d15m50.8s", distance=200 * u.km) gse = gcrs.transform_to(GeocentricSolarEcliptic(obstime=Time("J2000")))
Back out "[reland] Skip OpenMP Thread when OMP_NUM_THREADS is 1" Summary: Pull Request resolved: Original commit changeset: With the previous diff, when user sets KMP_AFFINITY, it will be ignored when OMP_NUM_THREADS is 1. That could cause performance regression. Test Plan: n/a
@@ -25,8 +25,8 @@ inline void parallel_for( #ifdef _OPENMP std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; - if (!omp_in_parallel() && ((end - begin) > grain_size) && omp_get_num_threads() > 1) { -#pragma omp parallel + +#pragma omp parallel if (!omp_in_parallel() && ((end - begin) > grain_size)) { // choose number of tasks based on grain size and number of threads // can't use num_threads clause due to bugs in GOMP's thread pool (See #32008) @@ -51,11 +51,9 @@ inline void parallel_for( if (eptr) { std::rethrow_exception(eptr); } - } else -#endif - { +#else f(begin, end); - } +#endif } template <class scalar_t, class F, class SF>
Centre align logo and buttons A subjective improvement in making the readme look nice. Only if others agree of course :) GitHub rst doesn't seem to like rst's actual ways of centreing things so it seems putting stuff in html is the way to do this.
-|logo| +.. raw:: html -**A full-featured, hackable tiling window manager written and configured in Python** - -|website| |pypi| |ci| |rtd| |license| + <p align="center"> + <a href="https://www.qtile.org"> + <img + src="https://raw.githubusercontent.com/qtile/qtile/master/logo.png" + alt="Logo" + > + </a> + </p> + <p align="center"> + <b>A full-featured, hackable tiling window manager written and configured in Python</b> + </p> + <p align="center"> + <a href="https://www.qtile.org"> + <img src="https://img.shields.io/badge/website-qtile.org-blue.svg" alt="Website"> + </a> + <a href="https://pypi.org/project/qtile/"> + <img src="https://img.shields.io/pypi/v/qtile.svg" alt="PyPI"> + </a> + <a href="https://github.com/qtile/qtile/actions"> + <img src="https://github.com/qtile/qtile/workflows/ci/badge.svg?branch=master" alt="CI Status"> + </a> + <a href="https://docs.qtile.org/en/latest/"> + <img src="https://readthedocs.org/projects/qtile/badge/?version=latest" alt="Read the Docs"> + </a> + <a href="https://github.com/qtile/qtile/blob/master/LICENSE"> + <img src="https://img.shields.io/github/license/qtile/qtile.svg" alt="License"> + </a> + </p> Features ======== @@ -38,26 +63,6 @@ and `guidelines`_ for contributing in the documentation. .. _`tips & tricks`: https://docs.qtile.org/en/latest/manual/hacking.html .. _`guidelines`: https://docs.qtile.org/en/latest/manual/contributing.html -.. |logo| image:: https://raw.githubusercontent.com/qtile/qtile/master/logo.png - :alt: Logo - :target: https://www.qtile.org -.. |website| image:: https://img.shields.io/badge/website-qtile.org-blue.svg - :alt: Website - :target: https://www.qtile.org -.. |pypi| image:: https://img.shields.io/pypi/v/qtile.svg - :alt: PyPI - :target: https://pypi.org/project/qtile/ -.. |ci| image:: https://github.com/qtile/qtile/workflows/ci/badge.svg?branch=master - :alt: CI status - :target: https://github.com/qtile/qtile/actions -.. |rtd| image:: https://readthedocs.org/projects/qtile/badge/?version=latest - :alt: Read the Docs - :target: https://docs.qtile.org/en/latest/ -.. |license| image:: https://img.shields.io/github/license/qtile/qtile.svg - :alt: License - :target: https://github.com/qtile/qtile/blob/master/LICENSE - - Maintainers ===========
don't use half precision in test_ema on CPU Summary: X-link: Pull Request resolved: To fix errors introduced in
@@ -160,14 +160,17 @@ class TestEMA(unittest.TestCase): self._test_ema_start_update(updates=1) def test_ema_fp32(self): - model = DummyModule().half() + # CPU no longer supports Linear in half precision + dtype = torch.half if torch.cuda.is_available() else torch.float + + model = DummyModule().to(dtype) optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=True) ema = EMA(model, config) x = torch.randn(32) - y = model(x.half()) + y = model(x.to(dtype)) loss = y.sum() loss.backward() optimizer.step() @@ -192,7 +195,7 @@ class TestEMA(unittest.TestCase): config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() ) - .half() + .to(dtype) .float() ), torch.norm( @@ -207,10 +210,14 @@ class TestEMA(unittest.TestCase): ( config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float() - ).half(), + ).to(dtype), ) def test_ema_fp16(self): + # CPU no longer supports Linear in half precision + if not torch.cuda.is_available(): + return + model = DummyModule().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict())
Add section on enhancements * Add section on enhancements Allow users to know how to request a feature change, by pointing them towards our enhancements repository
@@ -76,6 +76,7 @@ A hardware TPM should always be used when real secrets and trust is required. * [Running keylime](#running-keylime) * [Provisioning](#provisioning) * [Using keylime CA](#using-keylime-ca) +* [Request a Feature](#request-a-feature) * [Report a Security Vulnerability](#report-a-security-vulnerability) * [Meeting Information](#project-meetings) * [First Timers Support](#first-timers-support) @@ -474,6 +475,13 @@ You can install the services with the following command: Once installed, you can run and inspect the services `keylime_verifier`, `keylime_agent` and `keylime_registrar` via `systemctl`. +### Request a feature + +Keylime feature requests are tracked as enhancements in the [enhancements repository](https://github.com/keylime/enhancements) + +The enhancement process has been implemented to provide a way to review and +assess the impact(s) of significant changes to Keylime. + ## Report a Security Vulnerability Please contact us directly at [[email protected]](mailto:[email protected])
Fix minor typo Fixed minor typo in Autograd mechanics docs.
@@ -70,7 +70,7 @@ If there's even a single volatile input to an operation, its output is also going to be volatile. Volatility spreads accross the graph much easier than non-requiring gradient - you only need a **single** volatile leaf to have a volatile output, while you need **all** leaves to not require gradient to -have an output the doesn't require gradient. Using volatile flag you don't +have an output that doesn't require gradient. Using volatile flag you don't need to change any settings of your model parameters to use it for inference. It's enough to create a volatile input, and this will ensure that no intermediate states are saved.
Handle API pull addresses more flexibly No matter the address handed in, if the user has github integration then try using ssh to pull. If they don't, use a non-ssh cloning address.
@@ -18596,10 +18596,17 @@ def do_playground_pull(area, current_project, github_url=None, branch=None, pypi expected_name = 'unknown' if github_url: github_url = re.sub(r'[^A-Za-z0-9\-\.\_\~\:\/\#\[\]\@\$\+\,\=]', '', github_url) - if github_url.startswith('git@') and can_publish_to_github and github_email: expected_name = re.sub(r'.*/', '', github_url) expected_name = re.sub(r'\.git', '', expected_name) expected_name = re.sub(r'docassemble-', '', expected_name) + repo_url_info = re.sub(r'github_url=', '', github_url) + repo_url_info = re.sub(r'[email protected]:', '', repo_url_info) + repo_url_info = re.sub(r'https://github.com/', '', repo_url_info) + repo_url_info = re.sub(r'\.git', '', repo_url_info) + url_user_name = repo_url_info.split("/")[0] + url_package_name = repo_url_info.split("/")[1] + if can_publish_to_github and github_email: + github_url = f'[email protected]:{url_user_name}/{url_package_name}.git' (private_key_file, public_key_file) = get_ssh_keys(github_email) os.chmod(private_key_file, stat.S_IRUSR | stat.S_IWUSR) os.chmod(public_key_file, stat.S_IRUSR | stat.S_IWUSR) @@ -18616,9 +18623,7 @@ def do_playground_pull(area, current_project, github_url=None, branch=None, pypi output += err.output.decode() return dict(action="error", message="error running git clone. " + output) else: - expected_name = re.sub(r'.*/', '', github_url) - expected_name = re.sub(r'\.git', '', expected_name) - expected_name = re.sub(r'docassemble-', '', expected_name) + github_url = f'https://github.com/{url_user_name}/{url_package_name}' try: if branch is not None: logmessage("Doing git clone -b " + branch + " " + github_url)
Fill in more SCONS_CACHE_MSVC_CONFIG detail [ci skip] The original release note blurb on the change to the msvc config cache wasn't as clear as it could be, reworded a bit.
@@ -48,12 +48,16 @@ CHANGED/ENHANCED EXISTING FUNCTIONALITY - The change to "content" and "content-timestamp" Decider names is reflected in the User Guide as well, since the hash function may be other than md5 (tidying up from earlier change) -- If SCONS_CACHE_MSVC_CONFIG is used, it will now attempt a sanity check for - the cached compiler information, and regenerate the information - if needed, rather than just failing after certain compiler version - changes have happened. The cache file can still be manually removed - if there are issues to force a regen. The default cache filename now - has a .json suffix - the contents have always been json. +- If the (experimental) SCONS_CACHE_MSVC_CONFIG feature is used, it will now + attempt a sanity check for the cached compiler information, and regenerate + it if needed. Previously, this cache would fail if a compiler upgrade caused + a change to internal paths (e.g. upgrading from 17.1 to 17.2 causes + a necessary path component in some of the cached vars to need to 14.32.31326 + instead of 14.31.31103), and the cache file needed to be manually removed. + The default cachefile name is now "scons_msvc_cache.json" rather than + ".scons_msvc_cache" so there should be no transition problem if using the + default; if using a custom cache file name, the cache should still be + manually removed if there are problems to transition to the new style. - Update ninja file generation to only create response files for build commands which exceed MAXLINELENGTH - Update the debug output written to stdout for MSVC initialization which is enabled
make cutout op compatible with non eager mode cutout op is not compatible with non eager mode, this is a fix
@@ -189,7 +189,7 @@ def cutout( mask_4d = tf.expand_dims(masks.stack(), 1) mask = tf.tile(mask_4d, [1, tf.shape(images)[1], 1, 1]) images = tf.where( - mask == 0, + tf.equal(mask, 0), tf.ones_like(images, dtype=images.dtype) * constant_values, images, )
[lp.remove_inames] Sort the inames before removing them Picking a deterministic order in which the inames are removed is *necessary* to ensure that the left over domain is the same across interpreter runs.
@@ -1141,7 +1141,7 @@ def remove_unused_inames(kernel, inames=None): # {{{ remove them domains = kernel.domains - for iname in unused_inames: + for iname in sorted(unused_inames): new_domains = [] for dom in domains:
[docs] Fix typos in ray docs contributing guide There are a couple typos in the [Ray contributing guide](https://docs.ray.io/en/master/ray-contribute/docs.html). I fixed the typos, added a relevant link, and reworded a sentence.
"\n", "```shell\n", "git clone [email protected]:ray-project/ray.git\n", - "cd ray/docs\n", + "cd ray/doc\n", "```\n", "\n", "To install the documentation dependencies, run the following command:\n", "\n", "## What to contribute?\n", "\n", - "If you take Ray Tune as an example, you can see that our documentation is made up from several types of documentation\n", - "that you can all contribute to:\n", + "If you take Ray Tune as an example, you can see that our documentation is made up of several types of documentation,\n", + "all of which you can contribute to:\n", "\n", "- [a project landing page](https://docs.ray.io/en/master/tune/index.html),\n", "- [a getting started guide](https://docs.ray.io/en/master/tune/getting-started.html),\n", "- Notebooks, written in `.ipynb` format. All Tune examples are written as notebooks. These notebooks render in\n", " the browser like `.md` or `.rst` files, but have the added benefit of adding launch buttons to the top of the\n", " document, so that users can run the code themselves in either Binder or Google Colab. A good first example to look\n", - " at is [this Tune example](https://github.com/ray-project/ray/blob/master/doc/source/tune/examples/tune-serve-integration-mnist.ipynb)\n", + " at is [this Tune example](https://github.com/ray-project/ray/blob/master/doc/source/tune/examples/tune-serve-integration-mnist.ipynb).\n", "\n", "## Fixing typos and improving explanations\n", "\n", "If you spot a typo in any document, or think that an explanation is not clear enough, please consider\n", "opening a pull request.\n", "In this scenario you don't need to add any new tests.\n", - "Just run the linter and submit your pull request.\n", + "Just run the [linter](https://docs.ray.io/en/latest/ray-contribute/getting-involved.html#lint-and-formatting)\n", + "and submit your pull request.\n", "\n", "## Adding API references\n", "\n", "\n", "### Tags for your notebook\n", "\n", - "What makes this work is the `:tags: [hide-cell]` directive. in the `code-cell`.\n", + "What makes this work is the `:tags: [hide-cell]` directive in the `code-cell`.\n", "The reason we suggest starting with `.md` files is that it's much easier to add tags to them, as you've just seen.\n", "You can also add tags to `.ipynb` files, but you'll need to start a notebook server for that first, which may\n", "not want to do to contribute a piece of documentation.\n",
Document custom option name requirements. If custom options don't start with ``custom_`` (or ``board_``), pio-core will generate a warning here:
Custom options in ``platformio.ini`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -PlatformIO allows you extending project configuration with own data. You can read -these values later using `ProjectConfig API <https://github.com/platformio/platformio-core/blob/develop/platformio/project/config.py>`__: +PlatformIO allows you extending project configuration with own data. +Custom options have to start with ``custom_`` or ``board_`` to not generate a warning that +the unknown configuration option will be ignored by PlatformIO. +You can read these values later using `ProjectConfig API <https://github.com/platformio/platformio-core/blob/develop/platformio/project/config.py>`__: :``ProjectConfig::get(section, option, default=None)``: Get an option value for the named section
Update elf_rabbit.txt [0]
# See the file 'LICENSE' for copying permission # Reference: https://www.virustotal.com/#/ip-address/185.10.68.163 +# Reference: https://twitter.com/luc4m/status/1044148790008205312 /bruteforce_ssh /bruteforce_ssh_386
Oops, add the return back. We do not wanna process bot messages.
@@ -55,6 +55,7 @@ class Verification(Cog): if message.author.bot: # They're a bot, delete their message after the delay. await message.delete(delay=BOT_MESSAGE_DELETE_DELAY) + return # if a user mentions a role or guild member # alert the mods in mod-alerts channel
fix vm cannot be found error retry to prevent API doesn't return vms on time.
@@ -894,13 +894,11 @@ class AzurePlatform(Platform): errors = [f"{error.code}: {error.message}"] return errors - def _initialize_nodes(self, environment: Environment, log: Logger) -> None: - - node_context_map: Dict[str, Node] = dict() - for node in environment.nodes.list(): - node_context = get_node_context(node) - node_context_map[node_context.vm_name] = node - + # the VM may not be queried after deployed. use retry to mitigate it. + @retry(tries=60, delay=1) # type: ignore + def _load_vms( + self, environment: Environment, log: Logger + ) -> Dict[str, VirtualMachine]: compute_client = get_compute_client(self) environment_context = get_environment_context(environment=environment) vms_map: Dict[str, VirtualMachine] = dict() @@ -910,6 +908,22 @@ class AzurePlatform(Platform): for vm in vms: log.debug(f"found vm '{vm.name}' in resource group.") vms_map[vm.name] = vm + if not vms_map: + raise LisaException( + f"cannot find vm in resource group " + f"{environment_context.resource_group_name}" + ) + return vms_map + + def _initialize_nodes(self, environment: Environment, log: Logger) -> None: + node_context_map: Dict[str, Node] = dict() + for node in environment.nodes.list(): + node_context = get_node_context(node) + node_context_map[node_context.vm_name] = node + + environment_context = get_environment_context(environment=environment) + + vms_map: Dict[str, VirtualMachine] = self._load_vms(environment, log) network_client = NetworkManagementClient( credential=self.credential, subscription_id=self.subscription_id
Detect `pytest_` prefixed hooks `pluggy` is deprecating the `implprefix` argument in the next major release so implement this detection in our derived plugin manager. Relates to pytest-dev/pluggy#145
@@ -177,7 +177,7 @@ class PytestPluginManager(PluginManager): """ def __init__(self): - super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_") + super(PytestPluginManager, self).__init__("pytest") self._conftest_plugins = set() # state related to local conftest plugins @@ -231,6 +231,11 @@ class PytestPluginManager(PluginManager): method = getattr(plugin, name) opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) + + # collect unmarked hooks as long as they have the `pytest_' prefix + if opts is None and name.startswith("pytest_"): + opts = {} + if opts is not None: for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): opts.setdefault(name, hasattr(method, name))
yet again Found another place where I named the variable wrong.
@@ -15,7 +15,7 @@ class TestOMNICustom: # Recast time in minutes rather than seconds self.testInst.data.index = pds.Series([t + dt.timedelta(seconds=60-i) + dt.timedelta(minutes=i) \ - for i,t in enumerate(testInst.data.index)]) + for i,t in enumerate(self.testInst.data.index)]) # Add IMF data self.testInst.data['BX_GSM'] = pds.Series([3.17384966, 5.98685138,
Framework-HyperV: Introduce runSetupScriptOnlyOnce Default behaviour is not changed. This can be enabled as follows in case a test needs it: <setupScript>.\Testscripts\Windows\tester1.ps1</setupScript> + <setupScript>.\Testscripts\Windows\tester1.ps1</setupScript> + <runSetupScriptOnlyOnce>enable</runSetupScriptOnlyOnce>
@@ -523,20 +523,22 @@ function Run-Test { } if ($testPlatform -eq "Hyperv" -and $CurrentTestData.SetupScript) { + if ($null -eq $CurrentTestData.runSetupScriptOnlyOnce) { foreach ($VM in $AllVMData) { - if (Get-VM -Name $VM.RoleName -ComputerName ` - $VM.HyperVHost -EA SilentlyContinue) { - Stop-VM -Name $VM.RoleName -TurnOff -Force -ComputerName ` - $VM.HyperVHost + if (Get-VM -Name $VM.RoleName -ComputerName $VM.HyperVHost -EA SilentlyContinue) { + Stop-VM -Name $VM.RoleName -TurnOff -Force -ComputerName $VM.HyperVHost } foreach ($script in $($CurrentTestData.SetupScript).Split(",")) { - $null = Run-SetupScript -Script $script ` - -Parameters $testParameters + $null = Run-SetupScript -Script $script -Parameters $testParameters } - if (Get-VM -Name $VM.RoleName -ComputerName $VM.HyperVHost ` - -EA SilentlyContinue) { - Start-VM -Name $VM.RoleName -ComputerName ` - $VM.HyperVHost + if (Get-VM -Name $VM.RoleName -ComputerName $VM.HyperVHost -EA SilentlyContinue) { + Start-VM -Name $VM.RoleName -ComputerName $VM.HyperVHost + } + } + } + else { + foreach ($script in $($CurrentTestData.SetupScript).Split(",")) { + $null = Run-SetupScript -Script $script -Parameters $testParameters } } }
Delete OPENMP_STUB translation. Summary: Pull Request resolved:
"cudaMallocManaged": "hipSuccess" } }, - { - "path": "aten/src/TH/generic/THTensorMath.cpp", - "constants": { - "_OPENMP": "_OPENMP_STUB" - } - }, { "path": "aten/src/ATen/native/cuda/Distributions.cu", "s_constants": {
Remove redundant include from jit/fuser/cpu/dynamic_library.h. Summary: Pull Request resolved: ghimport-source-id:
#pragma once -#include <c10/util/Exception.h> -#include <torch/csrc/utils/disallow_copy.h> #include <torch/csrc/WindowsTorchApiMacro.h> +#include <torch/csrc/utils/disallow_copy.h> namespace torch { namespace jit {
minor fix It should be 'state space' instead of 'action space'
@@ -764,7 +764,7 @@ class DesiredVelocityEnv(BottleneckEnv): def get_state(self): """See class definition.""" - # action space is number of vehicles in each segment in each lane, + # state space is number of vehicles in each segment in each lane, # number of rl vehicles in each segment in each lane # mean speed in each segment, and mean rl speed in each # segment in each lane
Update Arduino_Code.ino Small changes: Moving the memset function outside the loop so it doesn't draw time from the calculation. This doesn't change much the hashrate. Using the actual size of "hash_bytes" to compare the result. sizeof is not always reliable.
@@ -52,7 +52,6 @@ uint16_t ducos1a(String lastblockhash, String newblockhash, uint16_t difficulty) newblockhash.toUpperCase(); const char *c = newblockhash.c_str(); size_t final_len = newblockhash.length() / 2; - memset(job, 0, job_maxsize); for (size_t i = 0, j = 0; j < final_len; i += 2, j++) job[j] = (c[i] % 32 + 9) % 25 * 16 + (c[i + 1] % 32 + 9) % 25; @@ -63,7 +62,7 @@ uint16_t ducos1a(String lastblockhash, String newblockhash, uint16_t difficulty) Sha1.print(lastblockhash + ducos1res); // Get SHA1 result uint8_t *hash_bytes = Sha1.result(); - if (memcmp(hash_bytes, job, sizeof(hash_bytes)) == 0) + if (memcmp(hash_bytes, job, SHA1_HASH_LEN) == 0) { // If expected hash is equal to the found hash, return the result return ducos1res; @@ -88,6 +87,7 @@ String get_DUCOID() { void loop() { // Wait for serial data while (Serial.available() > 0) { + memset(job, 0, job_maxsize); // Read last block hash lastblockhash = Serial.readStringUntil(','); // Read expected hash
Update exercises/concept/tisbury-treasure-hunt/.docs/instructions.md No quotes in the REPL
@@ -59,7 +59,7 @@ Implement the `get_coordinate()` function that takes a `(treasure, coordinate)` ```python >>> get_coordinate(('Scrimshawed Whale Tooth', '2A')) -"2A" +2A ``` ## 2. Format coordinates
Fixed some FLAKE8 errors and numbered tutorial FLAKE8 did not like '#%%' so they have been changed to '# %%'.
# coding: utf-8 """ -Kalman filter tutorial +1 - Kalman filter tutorial ====================== """ @@ -412,9 +412,10 @@ for state in track: angle=np.rad2deg(orient), alpha=0.2) ax.add_artist(ellipse) -# sphinx_gallery_thumbnail_number = 4 fig +# sphinx_gallery_thumbnail_number = 4 + # %% # There are situations in which linearisation of the problem is not useful/possible. # For this, we require an adjusted process, as tackled by the **Extended Kalman Filter**.
Adds list(.) around a dataset.keys() call to check for comparable data in reports. This fixes a bug in which datasets with identical circuits don't get treated as "comparable" during report generation because a .keys() generator is compared with a list of circuits.
@@ -1262,7 +1262,7 @@ def construct_standard_report(results, title="auto", if len(results) > 1: #check if data sets are comparable (if they have the same sequences) arbitrary = next(iter(results.values())) - comparable = all([list(v.dataset.keys()) == arbitrary.dataset.keys() for v in results.values()]) + comparable = all([list(v.dataset.keys()) == list(arbitrary.dataset.keys()) for v in results.values()]) if comparable: flags.add('CompareDatasets') sections.append(_section.DataComparisonSection())
fix: use OAUTHLIB_RELAX_TOKEN_SCOPE for ignoring scope change without this we get an error regarding the mismatch of scopes from microsoft
@@ -14,6 +14,8 @@ if any((os.getenv("CI"), frappe.conf.developer_mode, frappe.conf.allow_tests)): # Disable mandatory TLS in developer mode and tests os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" +os.environ["OAUTHLIB_RELAX_TOKEN_SCOPE"] = "1" + class ConnectedApp(Document): """Connect to a remote oAuth Server. Retrieve and store user's access token
push_notifications: Add `get_mobile_push_content()` function. Given the rendered content of a message, this function strips all the markup replacing emojis with their corresponding unicode representation.
@@ -3,9 +3,12 @@ import base64 import binascii from functools import partial import logging +import lxml.html as LH import os +import re import time import random + from typing import Any, Dict, List, Optional, SupportsInt, Text, Union, Type from apns2.client import APNsClient @@ -364,6 +367,38 @@ def get_alert_from_message(message): else: return "New Zulip mentions and private messages from %s" % (sender_str,) +def get_mobile_push_content(rendered_content): + # type: (Text) -> Text + def get_text(elem): + # type: (LH.HtmlElement) -> Text + # Convert default emojis to their unicode equivalent. + classes = elem.get("class", "") + if "emoji" in classes: + match = re.search("emoji-(?P<emoji_code>\S+)", classes) + if match: + emoji_code = match.group('emoji_code') + char_repr = "" + for codepoint in emoji_code.split('-'): + char_repr += chr(int(codepoint, 16)) + return char_repr + # Handles realm emojis, avatars etc. + if elem.tag == "img": + return elem.get("alt", "") + + return elem.text or "" + + def process(elem): + # type: (LH.HtmlElement) -> Text + plain_text = get_text(elem) + for child in elem: + plain_text += process(child) + plain_text += elem.tail or "" + return plain_text + + elem = LH.fromstring(rendered_content) + plain_text = process(elem) + return plain_text + def get_apns_payload(message): # type: (Message) -> Dict[str, Any] return {
Use correct sdist/build_ext modules This gets rid of the warning: "standard file not found: should have one of README, README.txt"
@@ -6,8 +6,8 @@ import os.path from setuptools import setup, Extension, find_packages from distutils.version import LooseVersion -from distutils.command.sdist import sdist as _sdist -from distutils.command.build_ext import build_ext as _build_ext +from setuptools.command.sdist import sdist as _sdist +from setuptools.command.build_ext import build_ext as _build_ext import versioneer MIN_CYTHON_VERSION = '0.24'
Update nasa-power.yaml updated the tags listed.
@@ -43,6 +43,7 @@ Tags: - metadata - meteorological - model + - opendap - radiation - satellite imagery - solar @@ -52,9 +53,6 @@ Tags: - water - weather - zarr - - NASA - - ARCO - - NASA Space Act Agreement License: There are no restrictions on the use, access, and/or download of data from the NASA POWER Project. We request that you site the NASA POWER Project when using the data provided from NASA POWER Project. Resources: - Description: POWER's Zarr Analysis Ready Data (ARD) Datasets
Add missing `git add` in release taken from most recent release process.
@@ -119,6 +119,7 @@ updates, leave it as it is. ```bash git checkout master -b "version_bump_${NEXT_VER}" python dev_tools/modules.py replace_version --old ${VER}.dev --new ${NEXT_VER}.dev +git add . git commit -m "Bump cirq version to ${NEXT_VER}" git push origin "version_bump_${NEXT_VER}" ```
markdownlint fixes README.md:581:121 MD013/line-length Line length [Expected: 120; Actual: 398] 99
@@ -578,7 +578,10 @@ npm install @prettier/plugin-php ### Prettier Community Plugins -Here's an [example SublimeText project](https://github.com/jonlabelle/SublimeJsPrettier/files/6498394/jsprettier-and-prettier-community-plugin-example.zip) \(posted in [Issue #239](https://github.com/jonlabelle/SublimeJsPrettier/issues/239)\) that uses the Prettier Community Plugin [prettier-plugin-go-template](https://github.com/NiklasPor/prettier-plugin-go-template) to format `*.gohtml` files. +Here's an [example SublimeText project](https://github.com/jonlabelle/SublimeJsPrettier/files/6498394/jsprettier-and-prettier-community-plugin-example.zip) +\(posted in [Issue #239](https://github.com/jonlabelle/SublimeJsPrettier/issues/239)\) +that uses the Prettier Community Plugin [prettier-plugin-go-template](https://github.com/NiklasPor/prettier-plugin-go-template) +to format `*.gohtml` files. ## Issues
Move first query builder instantiation out of init and into Model.boot() This allows for logic affecting the query builder (such as db connection resource injection) to be injected by observer's "booting" event
@@ -216,7 +216,6 @@ class Model(TimeStampsMixin, ObservesEvents, metaclass=ModelMeta): self._relationships = {} self._global_scopes = {} - self.get_builder() self.boot() @classmethod @@ -278,12 +277,12 @@ class Model(TimeStampsMixin, ObservesEvents, metaclass=ModelMeta): class_name = base_class.__name__ if class_name.endswith("Mixin"): - getattr(self, "boot_" + class_name)(self.builder) + getattr(self, "boot_" + class_name)(self.get_builder()) self._booted = True self.observe_events(self, "booted") - self.append_passthrough(list(self.builder._macros.keys())) + self.append_passthrough(list(self.get_builder()._macros.keys())) def append_passthrough(self, passthrough): self.__passthrough__ += passthrough
update: enable new nautilus-only functionality once the cluster is upgraded to nautilus, we can complete the process by disallowing pre-nautilus OSDs and enabling all new nautilus-only functionality
- import_role: name: ceph-client +- name: complete upgrade + hosts: + - all + become: True + tasks: + - import_role: + name: ceph-defaults + - import_role: + name: ceph-facts + + - name: container | disallow pre-nautilus OSDs and enable all new nautilus-only functionality + command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph osd require-osd-release nautilus" + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: True + when: containerized_deployment + + - name: non container | disallow pre-nautilus OSDs and enable all new nautilus-only functionality + command: ceph osd require-osd-release nautilus + delegate_to: "{{ groups[mon_group_name][0] }}" + run_once: True + when: not containerized_deployment - name: container | enable msgr2 protocol command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph mon enable-msgr2"
add explanation about including 429 code edit comments to describe 429 HTTP code that is included into count of total (valid) responses counted for the availability SLO.
@@ -194,8 +194,8 @@ resource "google_monitoring_slo" "rating_service_availability_slo" { "metric.label.\"response_code\"=\"200\"" ]) - # The total is the number of non-4XX and 429 responses - # We eliminate 4XX responses except 429 since they do not accurately represent server-side + # The total is the number of non-4XX and 429 (Too Many Requests) responses + # We eliminate 4XX responses except 429 (Too Many Requests) since they do not accurately represent server-side # failures and have the possibility of skewing our SLO measurements total_service_filter = join(" AND ", [ "metric.type=\"appengine.googleapis.com/http/server/response_count\"",
ASTNode: delay automatic type resolution to checking pass... ... and consider for this all parse fields (i.e. including inheritted ones). This will make it possible to tag as type resolved abstract AST nodes that contain fields that are typed by the grammar. TN:
@@ -1590,13 +1590,6 @@ class StructMetaclass(CompiledTypeMetaclass): 'Properties are not yet supported on plain structs' ) - # Consider that AST nodes with type annotations for all their fields - # are type resolved: they don't need to be referenced by the grammar. - cls.is_type_resolved = (is_astnode and - all(f._type is not None - for f in cls._fields.values() - if isinstance(f, Field))) - return cls @classmethod @@ -2492,6 +2485,13 @@ class ASTNode(Struct): """ Emit a non-fatal error if this ASTNode subclass is not type resolved. """ + # Consider that AST nodes with type annotations for all their fields + # are type resolved: they don't need to be referenced by the grammar. + cls.is_type_resolved = ( + cls.is_type_resolved + or all(f._type is not None for f in cls.get_parse_fields()) + ) + check_source_language( cls.is_type_resolved, 'Unresolved ASTNode subclass. Use it in the grammar or provide a'
Update Raisecom.RCIOS profile HG-- branch : feature/microservices
@@ -38,7 +38,7 @@ class Profile(BaseProfile): if script.parent is None: s_password = script.credentials.get("super_password", "") self.pattern_more = [ - (r"^--More-- ", " "), + (r"^--More-- \(\d+% of \d+ bytes\)", "r"), (r"^Enable: ", s_password + "\n") ]
Adding path_length method and the corresponding tests Minor change in path_length documentation Minor changes and added tests for path_length method Add tests for path_length method
@@ -2793,6 +2793,27 @@ class Tree: """ return self._ll_tree.get_kc_distance(other._ll_tree, lambda_) + def path_length(self, u, v): + """ + Returns the path length between two nodes + (i.e., the number of edges between two nodes in this tree). + If the two nodes have a most recent common ancestor, then this is defined as + ``tree.depth(u) + tree.depth(v) - 2 * tree.depth(tree.mrca(u, v))``. If the nodes + do not have an MRCA (i.e., they are in disconnected subtrees) the path length + is infinity. + + .. seealso:: See also the :meth:`.depth` method + + :param int u: The first node for path length computation. + :param int v: The second node for path length computation. + :return: The number of edges between the two nodes. + :rtype: int + """ + mrca = self.mrca(u, v) + if mrca == -1: + return math.inf + return self.depth(u) + self.depth(v) - 2 * self.depth(mrca) + def sackin_index(self): """ Returns the Sackin imbalance index for this tree. This is defined
Add key_type to CertificateUploadInputSchema Parse cert body to determine algo
@@ -326,6 +326,7 @@ class CertificateUploadInputSchema(CertificateCreationSchema): body = fields.String(required=True) chain = fields.String(missing=None, allow_none=True) csr = fields.String(required=False, allow_none=True, validate=validators.csr) + key_type = fields.String() destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True) notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True) @@ -373,6 +374,10 @@ class CertificateUploadInputSchema(CertificateCreationSchema): # Throws ValidationError validators.verify_cert_chain([cert] + chain) + @pre_load + def load_data(self, data): + data["key_type"] = utils.get_key_type_from_certificate(data["body"]) + class CertificateExportInputSchema(LemurInputSchema): plugin = fields.Nested(PluginInputSchema)
[tests] Remove test_hackerspaces wiki has been moved to https protocol and access will not fail anymore. The test is obsolete then (and the remaining tests will be enough).
@@ -196,10 +196,6 @@ class FailingSiteTestCase(SiteDetectionTestCase): """ self.assertNoSite('http://wiki.animutationportal.com/index.php/$1') - def test_hackerspaces(self): - """Test detection of MediaWiki sites for hackerspaces.org.""" - self.assertNoSite('http://hackerspaces.org/wiki/$1') - class APIDisabledTestCase(SiteDetectionTestCase):
Improve the test by checking created dataset This requires to work
@@ -44,7 +44,12 @@ class TestBQUserDataset(unittest.TestCase): unique_table_name = 'cf_test_table_' + str(uuid.uuid4()).replace('-', '_') dataset = BQUserDataset.name(unique_table_name) \ .column(name='cartodb_id', type='INT64') \ - .column('the_geom', 'GEOMETRY') - - dataset.ttl_seconds(30) + .column('the_geom', 'GEOMETRY') \ + .ttl_seconds(30) dataset.create() + + # do a quick check on the resulting table + result = dataset.download_stream() + df = pandas.read_csv(result) + self.assertEqual(df.shape, (0, 2)) + self.assertEqual(df.to_csv(index=False), 'cartodb_id,the_geom\n')
Do type checking for the input and kernel in the qnn conv2d * [QNN] Convolution 2D Implementation. Rebasing. Empty commit. Clang-format styling. * Reformatting code. * Fixing lint issues.
@@ -40,6 +40,26 @@ namespace qnn { // relay.op.qnn.conv2d TVM_REGISTER_NODE_TYPE(QnnConv2DAttrs); +bool QnnConv2DRel(const Array<Type>& types, + int num_inputs, + const Attrs& attrs, + const TypeReporter& reporter) { + CHECK_EQ(types.size(), 3); + const auto* data = types[0].as<TensorTypeNode>(); + const auto* weight = types[1].as<TensorTypeNode>(); + if (data == nullptr || weight == nullptr) return false; + const auto* param = attrs.as<QnnConv2DAttrs>(); + CHECK(param != nullptr) << "QnnConv2DAttrs cannot be nullptr."; + CHECK(data->dtype == Int(8) || data->dtype == UInt(8)) + << "Expected qnn conv2d type(int8, uint8) for input but was " << data->dtype; + CHECK(weight->dtype == Int(8) || weight->dtype == UInt(8)) + << "Expected qnn conv2d type(int8, uint8) for weight but was " << weight->dtype; + CHECK(param->out_dtype == Int(16) || param->out_dtype == Int(32)) + << "Expected qnn conv2d type(int32, int16) for output but was " << param->out_dtype; + CHECK(param->out_dtype.bits() > 0) << "Output dtype bits should be greater than 0."; + return Conv2DRel<QnnConv2DAttrs>(types, num_inputs, attrs, reporter); +} + // Workload - batch_size, in_channels, out_channels, kernel_h, kernel_w using WorkloadType = std::tuple<int, int, int, int, int>; @@ -475,7 +495,7 @@ operator to understand how to scale back the int32 output to (u)int8. .add_argument("data", "Tensor", "The quantized input data tensor.") .add_argument("weight", "Tensor", "The quantized weight tensor.") .set_support_level(11) -.add_type_rel("QnnConv2D", Conv2DRel<QnnConv2DAttrs>) +.add_type_rel("QnnConv2D", QnnConv2DRel) .set_attr<FTVMLegalize>("FTVMQnnCanonicalize", QnnConv2DCanonicalize); TVM_REGISTER_API("relay.qnn.op._make.conv2d").set_body_typed(MakeQnnConv2D);
Update integration.rst Made open source integrations link into a note to emphasize and make consistent with Overview>Integrations Overview.
Mattermost Integration Guide ---------------------------- -Documentation on extending and integrating with the Mattermost server. For developer focused documentation, see `https://developers.mattermost.com/ <https://developers.mattermost.com/>`_. To see what integrations are currently available, see `https://about.mattermost.com/community-applications/ <https://about.mattermost.com/community-applications/>`_. +Documentation on extending and integrating with the Mattermost server. For developer focused documentation, see `https://developers.mattermost.com/ <https://developers.mattermost.com/>`_. + + .. note:: + To see a list of open source integrations please see the `Mattermost Integrations Directory <https://about.mattermost.com/community-applications/>`_ Come `join our "Contributors" community channel <https://pre-release.mattermost.com/core/channels/tickets>`_ on our daily build server, where you can discuss questions with community members and the Mattermost core team. Join our `"Developers" channel <https://pre-release.mattermost.com/core/channels/developers>`_ for technical discussions and our `"Developer Toolkit" channel <https://pre-release.mattermost.com/core/channels/developer-toolkit>`_ for all integrations and plugins discussions.
Vehicles are cuboids, not cylinders! See
@@ -17,7 +17,7 @@ class BicycleVehicle(Vehicle): MASS: float = 1 # [kg] LENGTH_A: float = Vehicle.LENGTH / 2 # [m] LENGTH_B: float = Vehicle.LENGTH / 2 # [m] - INERTIA_Z: float = 1/12 * MASS * (Vehicle.LENGTH ** 2 + 3 * Vehicle.WIDTH ** 2) # [kg.m2] + INERTIA_Z: float = 1/12 * MASS * (Vehicle.LENGTH ** 2 + Vehicle.WIDTH ** 2) # [kg.m2] FRICTION_FRONT: float = 15.0 * MASS # [N] FRICTION_REAR: float = 15.0 * MASS # [N]
Fixed time conversion error ScansAPI.list If datetime object is passed as per the documentation an error is thrown as mktime is expecting an time tuple instead of a datetime object. Modified the code to convert to a timetuple after type checking has happened.
@@ -587,7 +587,7 @@ class ScansAPI(TIOEndpoint): # for the last_modified datetime attribute, we will want to convert # that into a timestamp integer before passing it to the API. params['last_modified'] = int(time.mktime(self._check( - 'last_modified', last_modified, datetime))) + 'last_modified', last_modified, datetime).timetuple())) return self._api.get('scans', params=params).json()['scans']
m1n1.hw.uat: fix VA_MASK Was missing the lowest bits, which broke unaligned reads/writes
@@ -48,6 +48,7 @@ class UAT(Reloadable): self.VA_MASK = 0 for (off, size) in self.LEVELS: self.VA_MASK |= (size - 1) << off + self.VA_MASK |= self.PAGE_SIZE - 1 def set_ttbr(self, addr): self.ttbr = addr
Update 2.7.rst Fixed another broken link - issue h5py#1145
@@ -88,7 +88,7 @@ Other changes .. _`#811` : https://github.com/h5py/h5py/pull/811 .. _`#812` : https://github.com/h5py/h5py/pull/812 .. _`HDF5 Direct Chunk Write` : https://support.hdfgroup.org/HDF5/doc/Advanced/DirectChunkWrite/ -.. _`HDF5 File Image Operations` : http://www.hdfgroup.org/HDF5/doc/Advanced/FileImageOperations/HDF5FileImageOperations.pdf +.. _`HDF5 File Image Operations` : https://support.hdfgroup.org/HDF5/doc/Advanced/FileImageOperations/HDF5FileImageOperations.pdf Acknowledgements ----------------
CodeSnippets: refactor on_message Reduce nesting and code duplication.
@@ -222,7 +222,9 @@ class CodeSnippets(Cog): @Cog.listener() async def on_message(self, message: Message) -> None: """Checks if the message has a snippet link, removes the embed, then sends the snippet contents.""" - if not message.author.bot: + if message.author.bot: + return + all_snippets = [] for pattern, handler in self.pattern_handlers: @@ -240,22 +242,23 @@ class CodeSnippets(Cog): # Sorts the list of snippets by their match index and joins them into a single message message_to_send = '\n'.join(map(lambda x: x[1], sorted(all_snippets))) + destination = message.channel if 0 < len(message_to_send) <= 2000 and message_to_send.count('\n') <= 15: await message.edit(suppress=True) + if len(message_to_send) > 1000 and message.channel.id != Channels.bot_commands: # Redirects to #bot-commands if the snippet contents are too long await self.bot.wait_until_guild_available() - await message.channel.send(('The snippet you tried to send was too long. Please ' - f'see <#{Channels.bot_commands}> for the full snippet.')) - bot_commands_channel = self.bot.get_channel(Channels.bot_commands) - await wait_for_deletion( - await bot_commands_channel.send(message_to_send), - (message.author.id,) + destination = self.bot.get_channel(Channels.bot_commands) + + await message.channel.send( + 'The snippet you tried to send was too long. ' + f'Please see {destination.mention} for the full snippet.' ) - else: + await wait_for_deletion( - await message.channel.send(message_to_send), + await destination.send(message_to_send), (message.author.id,) )
only catch `dropbox.exceptions.ApiError` in `get_metadata` ... ... and raise for instance AuthError, etc
@@ -53,6 +53,7 @@ OS_FILE_ERRORS = ( PermissionError, ) + def bytes_to_str(num, suffix='B'): """ Convert number to a human readable string with decimal prefix. @@ -235,7 +236,7 @@ class MaestralApiClient(object): try: md = self.dbx.files_get_metadata(dbx_path, **kwargs) logger.debug(f"Retrieved metadata for '{md.path_display}'") - except dropbox.exceptions.DropboxException as exc: + except dropbox.exceptions.ApiError as exc: # DropboxAPI error is only raised when the item does not exist on Dropbox # this is handled on a DEBUG level since we use call `get_metadata` to check # if a file exists @@ -274,7 +275,6 @@ class MaestralApiClient(object): """ # create local directory if not present dst_path_directory = osp.dirname(dst_path) - os.makedirs(dst_path_directory, exist_ok=True) md = self.dbx.files_download_to_file(dst_path, dbx_path, **kwargs) @@ -488,6 +488,7 @@ class MaestralApiClient(object): entries_all = [] for result in results: entries_all += result.entries + results_flattened = dropbox.files.ListFolderResult( entries=entries_all, cursor=results[-1].cursor, has_more=False)
Harden 'create_bucket' systest against 429 responses. Closes
@@ -94,7 +94,7 @@ class TestStorageBuckets(unittest.TestCase): new_bucket_name = 'a-new-bucket' + unique_resource_id('-') self.assertRaises(exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name) - created = Config.CLIENT.create_bucket(new_bucket_name) + created = retry_429(Config.CLIENT.create_bucket)(new_bucket_name) self.case_buckets_to_delete.append(new_bucket_name) self.assertEqual(created.name, new_bucket_name)
Update test.yml Trying to fix pytorch installation problem
@@ -27,7 +27,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install torch==1.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + pip install torch==1.7.1+cpu torchvision==0.8.2+cpu torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html pip install -e .[test] - name: Test with pytest run: |
Call update_search for project so it shows up in results [#PLAT-1176]
@@ -80,10 +80,12 @@ class ApiSearchTestCase: @pytest.fixture() def project(self, user_one): - return ProjectFactory( + project = ProjectFactory( title='Graduation', creator=user_one, is_public=True) + project.update_search() + return project @pytest.fixture() def project_public(self, user_one):
[Network] Description of network bastion tunnel command Description Corrected the description of network bastion tunnel command and its corresponding example. Also removed an "(autogenerated)" suffix attached to the example description for the network bastion show command which didn't make sense to me.
@@ -7346,7 +7346,7 @@ helps['network bastion show'] = """ type: command short-summary: Show a Azure bastion host machine. examples: - - name: Show a Azure bastion host machine. (autogenerated) + - name: Show a Azure bastion host machine. text: | az network bastion show --name MyBastionHost --resource-group MyResourceGroup crafted: true @@ -7378,9 +7378,9 @@ examples: helps['network bastion tunnel'] = """ type: command -short-summary: Show a Azure bastion host machine. +short-summary: Open a tunnel through Azure bastion to a target virtual machine. examples: - - name: Show a Azure bastion host machine. + - name: Open a tunnel through Azure bastion to a target virtual machine. text: | az network bastion tunnel --name MyBastionHost --resource-group MyResourceGroup --target-resource-id vmResourceId --resource-port 111 --port 222 """
Modify unit test cr:
@@ -277,11 +277,6 @@ class TestRequests(unittest.TestCase): 'OptionName': 'EnableSpot', 'Value': 'true' }, - { - 'Namespace': 'aws:ec2:instances', - 'OptionName': 'InstanceTypes', - 'Value': 't2.micro, t2.large' - }, { 'Namespace': 'aws:ec2:instances', 'OptionName': 'SpotMaxPrice',
[dynamic] multi out map test Summary: created when attempting to repro a report Test Plan: added test Reviewers: cdecarolis, sandyryza, owen, yuhan
import pytest -from dagster import execute_solid, solid +from dagster import execute_pipeline, execute_solid, pipeline, solid from dagster.core.definitions.events import Output from dagster.core.definitions.output import OutputDefinition from dagster.core.errors import DagsterInvalidDefinitionError, DagsterInvariantViolationError @@ -100,3 +100,40 @@ def should_work(_): assert result.output_value("numbers") == {"1": 1, "2": 2} assert result.output_value("letters") == {"a": "a", "b": "b", "c": "c"} assert result.output_value("wildcard") == "*" + + +def test_multi_out_map(): + @solid(output_defs=[DynamicOutputDefinition()]) + def emit(): + yield DynamicOutput(1, mapping_key="1") + yield DynamicOutput(2, mapping_key="2") + yield DynamicOutput(3, mapping_key="3") + + @solid( + output_defs=[ + OutputDefinition(name="a", is_required=False), + OutputDefinition(name="b", is_required=False), + OutputDefinition(name="c", is_required=False), + ] + ) + def multiout(inp: int): + if inp == 1: + yield Output(inp, output_name="a") + else: + yield Output(inp, output_name="b") + + @solid + def echo(a): + return a + + @pipeline + def destructure(): + a, b, c = emit().map(multiout) + echo.alias("echo_a")(a.collect()) + echo.alias("echo_b")(b.collect()) + echo.alias("echo_c")(c.collect()) + + result = execute_pipeline(destructure) + assert result.result_for_solid("echo_a").output_value() == [1] + assert result.result_for_solid("echo_b").output_value() == [2, 3] + assert result.result_for_solid("echo_c").skipped # all fanned in inputs skipped -> solid skips
synchronizer: ensure fairness between wallets Scenario (prior to change): User opens wallet1 with 10k addresses, and then immediately opens wallet2 with 100 addresses. wallet1 will synchronise first, fully, and only then will wallet2 start syncing. Now, wallet1 and wallet2 will sync concurrently (and wallet2 will finish much sooner as expected).
@@ -61,6 +61,10 @@ class SynchronizerBase(NetworkJobOnDefaultServer): def __init__(self, network: 'Network'): self.asyncio_loop = network.asyncio_loop self._reset_request_counters() + # Ensure fairness between Synchronizers. e.g. if multiple wallets + # are open, a large wallet should not starve the small wallets: + self._network_request_semaphore = asyncio.Semaphore(100) + NetworkJobOnDefaultServer.__init__(self, network) def _reset(self): @@ -106,6 +110,7 @@ class SynchronizerBase(NetworkJobOnDefaultServer): self.scripthash_to_address[h] = addr self._requests_sent += 1 try: + async with self._network_request_semaphore: await self.session.subscribe('blockchain.scripthash.subscribe', [h], self.status_queue) except RPCError as e: if e.message == 'history too large': # no unique error code @@ -167,6 +172,7 @@ class Synchronizer(SynchronizerBase): self.requested_histories.add((addr, status)) h = address_to_scripthash(addr) self._requests_sent += 1 + async with self._network_request_semaphore: result = await self.interface.get_history_for_scripthash(h) self._requests_answered += 1 self.logger.info(f"receiving history {addr} {len(result)}") @@ -210,6 +216,7 @@ class Synchronizer(SynchronizerBase): async def _get_transaction(self, tx_hash, *, allow_server_not_finding_tx=False): self._requests_sent += 1 try: + async with self._network_request_semaphore: raw_tx = await self.interface.get_transaction(tx_hash) except RPCError as e: # most likely, "No such mempool or blockchain transaction"
Fix type of FileKeyring.keyring_path This type is always non-None as it is assigned in __init__ from a call that returns non-None Path
@@ -123,7 +123,7 @@ class FileKeyring(FileSystemEventHandler): The salt is updated each time the master passphrase is changed. """ - keyring_path: Optional[Path] = None + keyring_path: Path keyring_lock_path: Path keyring_observer: Observer = None load_keyring_lock: threading.RLock # Guards access to needs_load_keyring
Update kpot.txt New trails + generalization
@@ -14,24 +14,37 @@ seeyouonlineservice.com # Reference: https://twitter.com/James_inthe_box/status/1108789993923723264 /DJvS7iHPfoXDzPvo/gate.php +/DJvS7iHPfoXDzPvo/login.php # Reference: https://twitter.com/4chr4f2/status/1103316628245164032 +/NIwxn5JBvMom6naz/gate.php /NIwxn5JBvMom6naz/login.php # Reference: https://twitter.com/avman1995/status/1090972632261029891 /03SleOcRkLyD69DQ/gate.php +/03SleOcRkLyD69DQ/login.php # Reference: https://twitter.com/ViriBack/status/1069965350442283009 # Reference: https://pastebin.com/PTkLE0se +/bnAgxoxMGuqZidGE/gate.php /bnAgxoxMGuqZidGE/login.php # Reference: https://twitter.com/malware_traffic/status/1110176575922864128 /8pqPR0YZKhASBoKU/gate.php +/8pqPR0YZKhASBoKU/login.php # Reference: https://twitter.com/takerk734/status/1113851637292920832 /9AhiTpcUu2lUfGvx/gate.php +/9AhiTpcUu2lUfGvx/login.php + +# Reference: https://www.proofpoint.com/us/threat-insight/post/new-kpot-v20-stealer-brings-zero-persistence-and-memory-features-silently-steal + +/a6Y5Qy3cF1sOmOKQ/gate.php +/a6Y5Qy3cF1sOmOKQ/login.php +/lmpUNlwDfoybeulu/gate.php +/lmpUNlwDfoybeulu/login.php
tests: Remove ignored parameters from custom profile field tests. `update_realm_custom_profile_field` does not take `field_type` as a parameter, so this removes it from any related tests. Possibly these test parameters were missed in a refactor of this endpoint / code.
@@ -415,21 +415,21 @@ class UpdateCustomProfileFieldTest(CustomProfileFieldTestCase): realm = get_realm("zulip") result = self.client_patch( "/json/realm/profile_fields/100", - info={"name": "Phone number", "field_type": CustomProfileField.SHORT_TEXT}, + info={"name": "Phone number"}, ) self.assert_json_error(result, "Field id 100 not found.") field = CustomProfileField.objects.get(name="Phone number", realm=realm) result = self.client_patch( f"/json/realm/profile_fields/{field.id}", - info={"name": "", "field_type": CustomProfileField.SHORT_TEXT}, + info={"name": ""}, ) self.assert_json_error(result, "Label cannot be blank.") self.assertEqual(CustomProfileField.objects.count(), self.original_count) result = self.client_patch( f"/json/realm/profile_fields/{field.id}", - info={"name": "New phone number", "field_type": CustomProfileField.SHORT_TEXT}, + info={"name": "New phone number"}, ) self.assert_json_success(result) field = CustomProfileField.objects.get(id=field.id, realm=realm) @@ -440,7 +440,7 @@ class UpdateCustomProfileFieldTest(CustomProfileFieldTestCase): result = self.client_patch( f"/json/realm/profile_fields/{field.id}", - info={"name": "*" * 41, "field_type": CustomProfileField.SHORT_TEXT}, + info={"name": "*" * 41}, ) msg = "name is too long (limit: 40 characters)" self.assert_json_error(result, msg) @@ -450,7 +450,6 @@ class UpdateCustomProfileFieldTest(CustomProfileFieldTestCase): info={ "name": "New phone number", "hint": "*" * 81, - "field_type": CustomProfileField.SHORT_TEXT, }, ) msg = "hint is too long (limit: 80 characters)" @@ -461,7 +460,6 @@ class UpdateCustomProfileFieldTest(CustomProfileFieldTestCase): info={ "name": "New phone number", "hint": "New contact number", - "field_type": CustomProfileField.SHORT_TEXT, }, ) self.assert_json_success(result) @@ -474,7 +472,7 @@ class UpdateCustomProfileFieldTest(CustomProfileFieldTestCase): result = self.client_patch( f"/json/realm/profile_fields/{field.id}", - info={"name": "Name ", "field_type": CustomProfileField.SHORT_TEXT}, + info={"name": "Name "}, ) self.assert_json_success(result) field.refresh_from_db() @@ -525,7 +523,7 @@ class UpdateCustomProfileFieldTest(CustomProfileFieldTestCase): self.assertTrue(self.custom_field_exists_in_realm(field_2.id)) result = self.client_patch( f"/json/realm/profile_fields/{field_2.id}", - info={"name": "Phone", "field_type": CustomProfileField.SHORT_TEXT}, + info={"name": "Phone"}, ) self.assert_json_error(result, "A field with that label already exists.")
Add CII badge We are currently determining our level of compliance, so this is a work in progress.
@@ -3,13 +3,15 @@ A Framework for Securing Software Update Systems .. image:: https://travis-ci.org/theupdateframework/tuf.svg?branch=develop :target: https://travis-ci.org/theupdateframework/tuf + :alt: Travis .. image:: https://coveralls.io/repos/theupdateframework/tuf/badge.svg?branch=develop :target: https://coveralls.io/r/theupdateframework/tuf?branch=develop + :alt: Coveralls .. image:: https://pyup.io/repos/github/theupdateframework/tuf/shield.svg :target: https://pyup.io/repos/github/theupdateframework/tuf/ - :alt: Updates + :alt: Pyup .. image:: https://pyup.io/repos/github/theupdateframework/tuf/python-3-shield.svg :target: https://pyup.io/repos/github/theupdateframework/tuf/ @@ -19,6 +21,10 @@ A Framework for Securing Software Update Systems :target: https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Ftuf?ref=badge_shield :alt: FOSSA +.. image:: https://bestpractices.coreinfrastructure.org/projects/1351/badge + :target: https://bestpractices.coreinfrastructure.org/projects/1351 + :alt: CII + .. image:: /docs/images/banner_readme.JPG The Update Framework (TUF) helps developers to secure new or existing
use `shutil.move` instead of `os.replace` ... ... in case we need to move across file system borders
@@ -2296,7 +2296,8 @@ class UpDownSync: # replace it and remove all its children. # we download to a temporary file first (this may take some time) - with tempfile.NamedTemporaryFile(delete=False) as f: + with tempfile.NamedTemporaryFile(prefix='maestral_download_', + delete=False) as f: tmp_fname = f.name md = self.client.download(f'rev:{entry.rev}', tmp_fname) @@ -2321,7 +2322,7 @@ class UpDownSync: with self.fs_events.ignore(local_path, event_types=(EVENT_TYPE_DELETED, EVENT_TYPE_CREATED)): - os.replace(tmp_fname, local_path) + shutil.move(tmp_fname, local_path) self.set_last_sync_for_path(entry.path_lower, self.get_ctime(local_path)) self.set_local_rev(entry.path_lower, md.rev)
replay: only keep one init_data in merged events don't merge init_data
@@ -269,9 +269,13 @@ void Replay::mergeSegments(const SegmentMap::iterator &begin, const SegmentMap:: new_events_->reserve(new_events_size); for (int n : segments_need_merge) { const auto &e = segments_[n]->log->events; - auto middle = new_events_->insert(new_events_->end(), e.begin(), e.end()); + if (e.size() > 0) { + auto insert_from = e.begin(); + if (new_events_->size() > 0 && (*insert_from)->which == cereal::Event::Which::INIT_DATA) ++insert_from; + auto middle = new_events_->insert(new_events_->end(), insert_from, e.end()); std::inplace_merge(new_events_->begin(), middle, new_events_->end(), Event::lessThan()); } + } updateEvents([&]() { events_.swap(new_events_);
Adapt existing content from elsewhere to document YamlSyntaxException: Failed to read 'data/test_wrong_yaml_stories/wrong_yaml.yml'. while parsing a flow node did not find expected node content in "data/test_wrong_yaml_stories/wrong_yaml.yml", line 2, column 1 You can use to validate the YAML syntax of your file..
@@ -21,6 +21,7 @@ abstract: The command line interface (CLI) gives you easy-to-remember commands f |`rasa test` |Tests a trained Rasa model on any files starting with `test_`. | |`rasa data split nlu` |Performs a 80/20 split of your NLU training data. | |`rasa data convert` |Converts training data between different formats. | +|`rasa data validate` |Checks the domain, NLU and conversation data. | |`rasa export` |Exports conversations from a tracker store to an event broker. | |`rasa x` |Launches Rasa X locally. | |`rasa -h` |Shows all available commands. | @@ -302,6 +303,72 @@ You can specify the input file or directory, output directory with the following rasa data convert nlg --help ``` +## rasa data validate +You can check your domain, NLU data, or conversation data for mistakes and inconsistencies. +To validate your data, run this command: +```bash +rasa data validate +``` + +By default the validator searches only for errors in the data, e.g. the same +example being listed as an example for two intents. +We recommend you to run this check before training a model because if data validation results +in errors, training a model will fail as well. + +To catch minor issues that don't prevent training a model but might indicate messy data, +e.g. unused intents, use the `--fail-on-warnings` flag. + +You can also validate specifically the story structure by running this command: +```bash +rasa data validate stories +``` +This validator checks if you have any stories where different assistant actions follow from the same +dialogue history. Conflicts between stories will prevent a model from learning the correct +pattern for a dialogue. If you have [Memoization Policy](./policies.mdx#memoization-policy) in your +`config.yml` file, run the validator with the `--max-history` argument and provide the `max_history` +value set in `config.yml`. If you didn't set `max_history` in the config file, provide the default value of `5`. + +:::caution check your story names +The `rasa data validate stories` command assumes that all your story names are unique! +::: + +You can use additional arguments, e.g. to specify the location of your data and domain files: +``` +usage: rasa data validate [-h] [-v] [-vv] [--quiet] + [--max-history MAX_HISTORY] [--fail-on-warnings] + [-d DOMAIN] [--data DATA] + {stories} ... + +positional arguments: + {stories} + stories Checks for inconsistencies in the story files. + +optional arguments: + -h, --help show this help message and exit + --max-history MAX_HISTORY + Number of turns taken into account for story structure + validation. (default: None) + --fail-on-warnings Fail validation on warnings and errors. If omitted + only errors will result in a non zero exit code. + (default: False) + -d DOMAIN, --domain DOMAIN + Domain specification. This can be a single YAML file, + or a directory that contains several files with domain + specifications in it. The content of these files will + be read and merged together. (default: domain.yml) + --data DATA Path to the file or directory containing Rasa data. + (default: data) + +Python Logging Options: + -v, --verbose Be verbose. Sets logging level to INFO. (default: + None) + -vv, --debug Print lots of debugging statements. Sets logging level + to DEBUG. (default: None) + --quiet Be quiet! Sets logging level to WARNING. (default: + None) +``` + + ## rasa export To export events from a tracker store using an event broker, run:
Add `check_redirect_on_user_query` helper function Extract the checking for a redirect to reduce complexity
@@ -1171,35 +1171,12 @@ def merge_tickets(request): }) -@helpdesk_staff_member_required -def ticket_list(request): - context = {} - - huser = HelpdeskUser(request.user) - - # Query_params will hold a dictionary of parameters relating to - # a query, to be saved if needed: - query_params = { - 'filtering': {}, - 'filtering_or': {}, - 'sorting': None, - 'sortreverse': False, - 'search_string': '', - } - default_query_params = { - 'filtering': { - 'status__in': [1, 2], - }, - 'sorting': 'created', - 'search_string': '', - 'sortreverse': False, - } - - # If the user is coming from the header/navigation search box, lets' first - # look at their query to see if they have entered a valid ticket number. If - # they have, just redirect to that ticket number. Otherwise, we treat it as - # a keyword search. - +def check_redirect_on_user_query(request, huser): + """If the user is coming from the header/navigation search box, lets' first + look at their query to see if they have entered a valid ticket number. If + they have, just redirect to that ticket number. Otherwise, we treat it as + a keyword search. + """ if request.GET.get('search_type', None) == 'header': query = request.GET.get('q') filter_ = None @@ -1228,7 +1205,37 @@ def ticket_list(request): except Ticket.DoesNotExist: # Go on to standard keyword searching pass + return None + + +@helpdesk_staff_member_required +def ticket_list(request): + context = {} + + huser = HelpdeskUser(request.user) + + # Query_params will hold a dictionary of parameters relating to + # a query, to be saved if needed: + query_params = { + 'filtering': {}, + 'filtering_or': {}, + 'sorting': None, + 'sortreverse': False, + 'search_string': '', + } + default_query_params = { + 'filtering': { + 'status__in': [1, 2], + }, + 'sorting': 'created', + 'search_string': '', + 'sortreverse': False, + } + #: check for a redirect, see function doc for details + redirect = check_redirect_on_user_query(request, huser) + if redirect: + return redirect try: saved_query, query_params = load_saved_query(request, query_params) except QueryLoadError:
Update strawberryfields/decompositions.py Docstring change
@@ -168,7 +168,8 @@ def nullT(n, m, U): def clements(V, tol=1e-11): - r"""Performs the Clements decomposition of a Unitary complex matrix. + r"""Performs the Clements decomposition of a unitary complex matrix, with local + phase shifts applied between two interferometers. See Clements et al. Optica 3, 1460 (2016) [10.1364/OPTICA.3.001460] for more details.
Code block: refactor `send_guide_embed` * Rename to `send_instructions` to be consistent with the use of "instructions" rather than "guide" elsewhere * Rename the `description` parameter to `instructions`
@@ -76,15 +76,15 @@ class CodeBlockCog(Cog, name="Code Block"): or channel.id in self.channel_whitelist ) - async def send_guide_embed(self, message: discord.Message, description: str) -> None: + async def send_instructions(self, message: discord.Message, instructions: str) -> None: """ - Send an embed with `description` as a guide for an improperly formatted `message`. + Send an embed with `instructions` on fixing an incorrect code block in a `message`. The embed will be deleted automatically after 5 minutes. """ log.trace("Sending an embed with code block formatting instructions.") - embed = Embed(description=description) + embed = Embed(description=instructions) bot_message = await message.channel.send(f"Hey {message.author.mention}!", embed=embed) self.codeblock_message_ids[message.id] = bot_message.id @@ -124,7 +124,7 @@ class CodeBlockCog(Cog, name="Code Block"): instructions = get_instructions(msg.content) if instructions: - await self.send_guide_embed(msg, instructions) + await self.send_instructions(msg, instructions) if msg.channel.id not in self.channel_whitelist: log.trace(f"Adding #{msg.channel} to the channel cooldowns.")
Remove 2 of 3 types of 'get_saved_export' There are 3 different signatures of this method in this file - one which accepts domain and schema ID, one which accepts domain, app_id, and identifier, and an SMS one that looks like it should match the first, but accepts domain and a boolean
@@ -281,13 +281,6 @@ class BaseDownloadExportView(HQJSONResponseMixin, BaseProjectDataView): """ raise NotImplementedError("You must implement download_export_form.") - @staticmethod - def get_export_schema(domain, export_id): - doc = get_document_or_404_lite(SavedExportSchema, export_id) - if doc.index[0] == domain: - return doc - raise Http404(_("Export not found")) - @property def export_id(self): return self.kwargs.get('export_id') @@ -325,7 +318,7 @@ class BaseDownloadExportView(HQJSONResponseMixin, BaseProjectDataView): return exports def _get_export(self, domain, export_id): - return self.get_export_schema(self.domain, export_id) + raise NotImplementedError() @property def max_column_size(self): @@ -403,9 +396,8 @@ class BaseDownloadExportView(HQJSONResponseMixin, BaseProjectDataView): return download def _get_and_rebuild_export_schema(self, export_id): - export_object = self.get_export_schema(self.domain, export_id) - export_object.update_schema() - return export_object + # TODO + raise Exception("this must be failing if it's ever called...") def _get_bulk_download_task(self, export_specs, export_filter): for export_spec in export_specs: @@ -2016,10 +2008,6 @@ class DownloadNewSmsExportView(GenericDownloadNewExportMixin, BaseDownloadExport export_id = None sms_export = True - @staticmethod - def get_export_schema(domain, include_metadata): - return SMSExportDataSchema.get_latest_export_schema(domain, include_metadata) - @property def export_list_url(self): return None
Make testManyArgs actually test pmap with many args For some reason the test has always been passing a single array since it was added, which seems contradictory with its purpose.
@@ -1690,7 +1690,7 @@ class PythonPmapTest(jtu.JaxTestCase): vals = list(range(500)) ndevices = jax.device_count() - self.assertAllClose(f(jnp.array([vals] * ndevices)), + self.assertAllClose(f([np.array([i] * ndevices) for i in range(500)]), jnp.array([sum(vals)] * ndevices)) def testPostProcessMap2(self):
New link. Looked at similar architecture (Big slanty building)
@@ -176,6 +176,8 @@ id: tx-dallas-4 The first-person video shows an individual running away from what seem to be loud explosions. +Additional footage shows a protest in the same vicinity. Police sound a siren and protestors begin to back away. Police then begin releasing tear gas. + tags: flashbangs, tear-gas id: tx-dallas-2 @@ -183,6 +185,7 @@ id: tx-dallas-2 **Links** * https://twitter.com/xtranai/status/1266898175568338945 +* https://twitter.com/calvinwatkins ### Police fire rubber bullet on peaceful protestor | June 1st
Webhooks: add support for multi-file upload `Webhook.send()` now accepts a `files` kwarg holding a list of `File` objects, which are included in the HTTP request as `file1`, `file2` and so on. This is an undocumented feature of the Discord API, but is analogous with the client's sending of messages with multiple files.
@@ -104,13 +104,20 @@ class WebhookAdapter: # mocks a ConnectionState for appropriate use for Message return BaseUser(state=self, data=data) - def execute_webhook(self, *, payload, wait=False, file=None): + def execute_webhook(self, *, payload, wait=False, file=None, files=None): if file is not None: multipart = { 'file': file, 'payload_json': utils.to_json(payload) } data = None + elif files is not None: + multipart = { + 'payload_json': utils.to_json(payload) + } + for i, file in enumerate(files, start=1): + multipart['file%i' % i] = file + data = None else: data = payload multipart = None @@ -144,11 +151,11 @@ class AsyncWebhookAdapter(WebhookAdapter): data = utils.to_json(payload) if multipart: - file = multipart.pop('file', None) data = aiohttp.FormData() - if file: - data.add_field('file', file[1], filename=file[0], content_type=file[2]) for key, value in multipart.items(): + if key.startswith('file'): + data.add_field(key, value[1], filename=value[0], content_type=value[2]) + else: data.add_field(key, value) for tries in range(5): @@ -561,8 +568,8 @@ class Webhook: return self._adapter.edit_webhook(**payload) - def send(self, content=None, *, wait=False, username=None, avatar_url=None, - tts=False, file=None, embed=None, embeds=None): + def send(self, content=None, *, wait=False, username=None, avatar_url=None, tts=False, + file=None, files=None, embed=None, embeds=None): """|maybecoro| Sends a message using the webhook. @@ -596,7 +603,10 @@ class Webhook: tts: bool Indicates if the message should be sent using text-to-speech. file: :class:`File` - The file to upload. + The file to upload. This cannot be mixed with ``files`` parameter. + files: List[:class:`File`] + A list of files to send with the content. This cannot be mixed with the + ``file`` parameter. embed: :class:`Embed` The rich embed for the content to send. This cannot be mixed with ``embeds`` parameter. @@ -624,6 +634,8 @@ class Webhook: payload = {} + if files is not None and file is not None: + raise InvalidArgument('Cannot mix file and files keyword arguments.') if embeds is not None and embed is not None: raise InvalidArgument('Cannot mix embed and embeds keyword arguments.') @@ -650,6 +662,14 @@ class Webhook: return self._adapter.execute_webhook(wait=wait, file=to_pass, payload=payload) finally: file.close() + elif files is not None: + try: + to_pass = [(file.filename, file.open_file(), 'application/octet-stream') + for file in files] + return self._adapter.execute_webhook(wait=wait, files=to_pass, payload=payload) + finally: + for file in files: + file.close() else: return self._adapter.execute_webhook(wait=wait, payload=payload)
An extra kill_process_psutil utility * Add a function that terminate a `psutil.Process` instance and all of its child processes. * The original kill_process function also use `psutil` to kill child processes, but we need to catch NoSuchProcess exception because after a process is terminated some related processes may also terminate.
@@ -29,7 +29,6 @@ def kill_process(proc, timeout=5, signal_=None, output=None): If alive, kills the process. First call ``terminate()`` or pass ``signal_`` if specified to terminate for up to time specified in timeout parameter. - If process hangs then call ``kill()``. :param proc: process to kill @@ -38,23 +37,16 @@ def kill_process(proc, timeout=5, signal_=None, output=None): :type timeout: ``int`` :param output: Optional file like object for writing logs. :type output: ``file`` + :return: Exit code of process + :rtype: ``int`` or ``NoneType`` """ _log = functools.partial(_log_proc, output=output) retcode = proc.poll() - if retcode is not None: return retcode - parent = psutil.Process(proc.pid) - for child in parent.children(recursive=True): - try: - child.send_signal(signal.SIGTERM) - except Exception as exc: - _log( - msg='While terminating child proc - {}'.format(exc), - warn=True - ) + child_procs = psutil.Process(proc.pid).children(recursive=True) if signal_ is not None: proc.send_signal(signal_) @@ -81,9 +73,72 @@ def kill_process(proc, timeout=5, signal_=None, output=None): warn=True ) + _, alive = psutil.wait_procs(child_procs, timeout=timeout) + for p in alive: + try: + p.kill() + except psutil.NoSuchProcess: + pass # already dead + except Exception as exc: + _log( + msg='While terminating child process - {}'.format(exc), + warn=True + ) + return proc.returncode +def kill_process_psutil(proc, timeout=5, signal_=None, output=None): + """ + If alive, kills the process (an instance of ``psutil.Process``). + Try killing the child process at first and then killing itself. + First call ``terminate()`` or pass ``signal_`` if specified + to terminate for up to time specified in timeout parameter. + If process hangs then call ``kill()``. + + :param proc: process to kill + :type proc: ``psutil.Process`` + :param timeout: timeout in seconds, defaults to 5 seconds + :type timeout: ``int`` + :param output: Optional file like object for writing logs. + :type output: ``file`` + :return: List of processes which are still alive + :rtype: ``list`` or ``psutil.Process`` + """ + _log = functools.partial(_log_proc, output=output) + + all_procs = proc.children(recursive=True) + [proc] + + try: + if signal_ is not None: + proc.send_signal(signal_) + else: + proc.terminate() + except psutil.NoSuchProcess: + pass # already dead + except Exception as exc: + _log( + msg='While terminating process - {}'.format(exc), + warn=True + ) + _, alive = psutil.wait_procs(all_procs, timeout=timeout) + + if len(alive) > 0: + for p in alive: + try: + p.kill() + except psutil.NoSuchProcess: + pass # already dead + except Exception as exc: + _log( + msg='Could not kill process - {}'.format(exc), + warn=True + ) + _, alive = psutil.wait_procs(alive, timeout=timeout) + + return alive + + DEFAULT_CLOSE_FDS = platform.system() != 'Windows'
Fix 12377 For Debian issue.
@@ -13,11 +13,12 @@ from .core import file_reader, file_writer # Register read and write methods into Astropy: # determine if it is 1) installed and 2) the correct version (v5.0+) try: + import astropy from astropy.utils.introspection import minversion except ImportError: ASTROPY_GE_5 = False else: - ASTROPY_GE_5 = minversion("astropy", "5.0") + ASTROPY_GE_5 = minversion(astropy, "5.0") if ASTROPY_GE_5: # Astropy is installed and v5.0+ so we import the following modules
Update Ads1115.py Use port variable
@@ -12,7 +12,7 @@ if ('virtual' in globals() and virtual): # This section is to be used if you use the i2c pins of the Arduino arduino = Runtime.start("Arduino","Arduino") -arduino.connect("COM3") +arduino.connect(port) # Sleep so that the Arduino can be initialized sleep(4) ads1115.attach(arduino,"1","0x48")
Fixed travis error: - Comited wrong ingest.py file.
@@ -38,6 +38,7 @@ def find_diff(input_type, output_type, index, time_size, **query): tiles_in = workflow.list_cells(product=input_type.name, **query) tiles_out = workflow.list_cells(product=output_type.name, **query) + #TODO(csiro) Remove duplicates based on dataset_id / time. Could contain duplicates. tasks = [{'tile': tile, 'tile_index': key} for key, tile in tiles_in.items() if key not in tiles_out] new_tasks = [] @@ -197,7 +198,10 @@ def create_task_list(index, output_type, year, source_type, config): query['x'] = Range(bounds['left'], bounds['right']) query['y'] = Range(bounds['bottom'], bounds['top']) + time_size = 1 + if 'time' in config['storage']['tile_size']: time_size = config['storage']['tile_size']['time'] + tasks = find_diff(source_type, output_type, index, time_size, **query) _LOG.info('%s tasks discovered', len(tasks))
update shell_plus to not save ipython history when using Jupyter xref:
@@ -204,7 +204,7 @@ class Command(BaseCommand): return {'django_extensions': ks} - def run_notebookapp(self, app_init, options, use_kernel_specs=True): + def run_notebookapp(self, app_init, options, use_kernel_specs=True, history=True): no_browser = options['no_browser'] if self.extra_args: @@ -235,6 +235,10 @@ class Command(BaseCommand): if not use_kernel_specs: notebook_arguments.extend(ipython_arguments) + # disable history if not already configured in some other way + if not history and not any(arg.startswith('--HistoryManager') for arg in ipython_arguments): + ipython_arguments.append('--HistoryManager.enabled=False') + if not callable(app_init): app = app_init warnings.warn('Initialize should be a callable not an app instance', DeprecationWarning) @@ -322,7 +326,7 @@ class Command(BaseCommand): return app def run_jupyterlab(): - self.run_notebookapp(app_init, options) + self.run_notebookapp(app_init, options, history=False) return run_jupyterlab @shell_runner(flags=['--plain'], name='plain Python')
swarming: improve logging in poll Print out the bot id earler to help diagnosing failures; especially with dimensions longer than 1500 bytes, which causes a BadValueError exception.
@@ -318,7 +318,7 @@ class _BotBaseHandler(_BotApiHandler): leased_indefinitely = None machine_type = None if bot_id: - logging.debug('Fetching bot info and settings') + logging.debug('Fetching bot info and settings for bot id: %s', bot_id) bot_info, bot_settings = ndb.get_multi([ bot_management.get_info_key(bot_id), bot_management.get_settings_key(bot_id)]) @@ -329,7 +329,6 @@ class _BotBaseHandler(_BotApiHandler): # Make sure bot self-reported ID matches the authentication token. Raises # auth.AuthorizationError if not. - logging.debug('Fetching bot group config') bot_group_cfg = bot_auth.validate_bot_id_and_fetch_config( bot_id, machine_type)
Use Function to implement fork. Summary: Pull Request resolved: This ensures normal optimization passes run for forked functions. Test Plan: Imported from OSS
@@ -371,8 +371,8 @@ struct CodeImpl { std::vector<IValue> constant_table_; std::vector<Operation> operator_table_; std::vector<Function*> function_table_; + std::vector<std::unique_ptr<GraphFunction>> forked_functions_; std::vector<TypePtr> type_table_; - std::vector<Code> code_table_; std::vector<std::function<void(std::vector<IValue>&)>> profile_function_table_; @@ -772,9 +772,11 @@ struct CodeImpl { void emitFork(Node* node) { emitLoadInputs(node->inputs()); - code_table_.emplace_back( - Code(node->g(attr::Subgraph), "<forked function>")); - insertInstruction(FORK, code_table_.size() - 1, node->inputs().size()); + std::unique_ptr<GraphFunction> forked_fn(new GraphFunction( + "<forked function>", node->g(attr::Subgraph), nullptr)); + forked_functions_.emplace_back(std::move(forked_fn)); + function_table_.emplace_back(forked_functions_.back().get()); + insertInstruction(FORK, function_table_.size() - 1, node->inputs().size()); } void emitWarn(Node* node) { @@ -1314,8 +1316,11 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { } break; case FORK: { // Move inputs to a separate stack + Function* forked_fn = af.functions[inst.X]; InterpreterState forked_interpreter( - frames.back().function->code_table_.at(inst.X)); + forked_fn->get_executor() + .getPlanFor(stack, GraphExecutor::getDefaultNumBailOuts()) + .code); InterpreterContinuation continuation( forked_interpreter, Stack(stack.end() - inst.N, stack.end()),
Enable elasticsearch in setup-elastic.sh By enabling elasticsearch in setup-elastic.sh we provide more support for persistent infra
@@ -59,6 +59,11 @@ else systemctl restart elasticsearch fi +if ! systemctl is-enabled --quiet elasticsearch; then + echo "[+] Enabling Elasticsearch" + systemctl enable elasticsearch +fi + if ! systemctl is-active --quiet elasticsearch; then echo "[!] Failed to start Elasticsearch!" && exit 3 fi
update ANSYSCDBMeshIO.read() to determine true number of fields fixes reading files with wrong nblock/eblock information update make_format()
@@ -9,8 +9,8 @@ from sfepy.base.base import (complex_types, dict_from_keys_init, assert_, is_derived_class, ordered_iteritems, insert_static_method, output, get_default, get_default_attr, Struct, basestr) -from sfepy.base.ioutils \ - import skip_read_line, read_token, read_array, read_list, pt, enc, dec +from sfepy.base.ioutils import (skip_read_line, look_ahead_line, read_token, + read_array, read_list, pt, enc, dec) import os.path as op import six from six.moves import range @@ -2425,7 +2425,7 @@ class ANSYSCDBMeshIO(MeshIO): return ok @staticmethod - def make_format(format): + def make_format(format, nchar=1000): idx = []; dtype = []; start = 0; @@ -2439,6 +2439,8 @@ class ANSYSCDBMeshIO(MeshIO): aux = ret[2].partition('.') step = int(aux[0]) for j in range(int(ret[0])): + if (start + step) > nchar: + break idx.append((start, start+step)) start += step dtype.append(ret[1]) @@ -2476,12 +2478,11 @@ class ANSYSCDBMeshIO(MeshIO): if (kw == 'nblock'): # Solid keyword -> 3, otherwise 1 is the starting coors index. ic = 3 if len(row) == 3 else 1 - n_c = int(row[1]) fmt = fd.readline() fmt = fmt.strip()[1:-1].split(',') - idx, dtype = self.make_format(fmt) - idx = idx[:n_c] - nchar = idx[-1][1] + 1 + row = look_ahead_line(fd) + nchar = len(row) + idx, dtype = self.make_format(fmt, nchar) ii0, ii1 = idx[0] while True: row = fd.readline() @@ -2498,16 +2499,15 @@ class ANSYSCDBMeshIO(MeshIO): if (len(row) <= 2) or row[2].strip().lower() != 'solid': continue - n_c = int(row[1]) fmt = fd.readline() fmt = [fmt.strip()[1:-1]] - idx, dtype = self.make_format(fmt) - idx = idx[:n_c] + row = look_ahead_line(fd) + nchar = len(row) + idx, dtype = self.make_format(fmt, nchar) imi0, imi1 = idx[0] # Material id. inn0, inn1 = idx[8] # Number of nodes in line. ien0, ien1 = idx[10] # Element number. - nchar = idx[-1][1] + 1 ic0 = 11 while True: row = fd.readline()
Bug Regenerate session_secret if it can't be used with oauth-proxy session_secret generated by 3.10 is 200 bytes. oauth-proxy can use 16, 24 or 32 bytes session_secret.
copy: content: "{{ 32 | lib_utils_oo_random_word }}" dest: "{{ generated_certs_dir }}/session_secret" - when: - - not session_secret_file.stat.exists + when: not session_secret_file.stat.exists or session_secret_file.stat.size > 50 # gen oauth_secret if necessary - name: Generate oauth secret
Limit guard in can_add_batch if no summary After a summary has been created, no more batches can be added, so the guard around queue and block sizes should only be used when there is no summary.
@@ -113,7 +113,8 @@ impl CandidateBlock { } pub fn can_add_batch(&self) -> bool { - self.max_batches == 0 || self.pending_batches.len() < self.max_batches + self.summary.is_none() + && (self.max_batches == 0 || self.pending_batches.len() < self.max_batches) } fn check_batch_dependencies_add_batch(&mut self, batch: &Batch) -> bool { @@ -469,7 +470,11 @@ impl CandidateBlock { builder .getattr(py, "block_header") .expect("BlockBuilder has no attribute 'block_header'") - .setattr(py, "consensus", cpython::PyBytes::new(py, consensus_data.as_slice())) + .setattr( + py, + "consensus", + cpython::PyBytes::new(py, consensus_data.as_slice()), + ) .expect("BlockHeader has no attribute 'consensus'"); self.sign_block(builder);
Accept underscore, as well as dash, separated dates The underscore separated format (e.g 2018_10) is used elsewhere in the system and it simplifies things if we can accept the same format here.
@@ -3,9 +3,9 @@ DEFAULT_NUM_MONTHS = 60 def generate_dates(end_str, months=None): """ - Given an end date as a string in YYYY-MM form, return a list of N - consecutive months as strings in YYYY-MM-01 form, with that month as the - final member + Given an end date as a string in YYYY-MM form (or the underscore separated + equivalent), return a list of N consecutive months as strings in YYYY-MM-01 + form, with that month as the final member """ if months is None: months = DEFAULT_NUM_MONTHS @@ -20,10 +20,10 @@ def generate_dates(end_str, months=None): def parse_date(date_str): """ - Given a date string in YYYY-MM form, return a pair of (year, month) - integers + Given a date string in YYYY-MM form (or the underscore separated + equivalent), return a pair of (year, month) integers """ - year_str, month_str = date_str.split('-')[:2] + year_str, month_str = date_str.replace('_', '-').split('-')[:2] assert len(year_str) == 4 assert len(month_str) == 2 return int(year_str), int(month_str)