message
stringlengths
13
484
diff
stringlengths
38
4.63k
update ContactTerm to return sparse vector and matrix with active DOFs new .call_function(), .eval_real() update .function(), .get_fargs() TODO: return sparse Gc directly from ccontres
@@ -2,6 +2,7 @@ import numpy as nm from sfepy.base.base import Struct from sfepy.terms.terms import Term +from sfepy.terms.extmods import terms class ContactInfo(Struct): """ @@ -23,13 +24,31 @@ class ContactTerm(Term): self.ci = None - @staticmethod - def function(out, geo, fmode): - out_qp = nm.zeros((out.shape[0], geo.n_qp) + out.shape[2:], - dtype=out.dtype) - status = geo.integrate(out, nm.ascontiguousarray(out_qp)) + def call_function(self, fargs): + try: + out, status = self.function(*fargs) + + except (RuntimeError, ValueError): + terms.errclear() + raise + + if status: + terms.errclear() + raise ValueError('term evaluation failed! (%s)' % self.name) + + return out, status - return status + def eval_real(self, shape, fargs, mode='eval', term_mode=None, + diff_var=None, **kwargs): + out, status = self.call_function(fargs) + if mode != 'weak': + raise ValueError('unsupported evaluation mode! (%s)' % mode) + + return out, status + + @staticmethod + def function(out_cc): + return out_cc, 0 def get_fargs(self, epss, virtual, state, mode=None, term_mode=None, diff_var=None, **kwargs): @@ -145,10 +164,17 @@ class ContactTerm(Term): Gc = nm.zeros(neq, dtype=nm.float64) activeGPs = GPs[:, 2*nsd+3] + + if diff_var is None: + max_num = 1 keyContactDetection = 1 + keyAssembleKc = 0 + + else: + max_num = 4 * (nsd * nsn)**2 * ngp * GPs.shape[0] + keyContactDetection = 0 keyAssembleKc = 1 - max_num = 4 * nsd * nsn * ngp * GPs.shape[0] print 'max_num:', max_num vals = nm.empty(max_num, dtype=nm.float64) rows = nm.empty(max_num, dtype=nm.int32) @@ -165,12 +191,24 @@ class ContactTerm(Term): print Gc.mean(), num print GPs print 'true num:', num - from sfepy.base.base import debug; debug() if diff_var is None: - fmode = 0 + from sfepy.discrete.variables import create_adof_conn + rows = nm.unique(create_adof_conn(nm.arange(len(Gc)), sd.econn, + nsd, 0)) + Gc = Gc[rows] + + eq = state.eq_map.eq + erows = eq[rows] + active = (erows >= 0) + out_cc = (Gc[active], erows[active]) else: - fmode = 1 + vals, rows, cols = vals[:num], rows[:num], cols[:num] + eq = state.eq_map.eq + + erows, ecols = eq[rows], eq[cols] + active = (erows >= 0) & (ecols >= 0) + out_cc = (vals[active], erows[active], ecols[active]) - return geo, fmode + return out_cc,
set halt level to none/5 for docutils parse function This results in all errors being accumulated and not only one fixes
@@ -219,7 +219,7 @@ def check( # noqa: CCR001 writer=writer, source_path=filename, settings_overrides={ - "halt_level": report_level, + "halt_level": 5, "report_level": report_level, "warning_stream": string_io, },
CodeSnippets: don't send snippets if the original message was deleted Fixes BOT-13B
@@ -4,8 +4,8 @@ import textwrap from typing import Any from urllib.parse import quote_plus +import discord from aiohttp import ClientResponseError -from discord import Message from discord.ext.commands import Cog from bot.bot import Bot @@ -240,7 +240,7 @@ class CodeSnippets(Cog): return '\n'.join(map(lambda x: x[1], sorted(all_snippets))) @Cog.listener() - async def on_message(self, message: Message) -> None: + async def on_message(self, message: discord.Message) -> None: """Checks if the message has a snippet link, removes the embed, then sends the snippet contents.""" if message.author.bot: return @@ -249,7 +249,11 @@ class CodeSnippets(Cog): destination = message.channel if 0 < len(message_to_send) <= 2000 and message_to_send.count('\n') <= 15: + try: await message.edit(suppress=True) + except discord.NotFound: + # Don't send snippets if the original message was deleted. + return if len(message_to_send) > 1000 and message.channel.id != Channels.bot_commands: # Redirects to #bot-commands if the snippet contents are too long
timezone: Fix {set/get}_zone for legacy NILinuxRT On legacy version of NILinuxRT, /etc/localtime is a symlink to /etc/natinst/share/localtime. On this systems, {set/get}_zone must work directly on /ect/natinst/share/localtime, instead of default file. Conflicts: salt/modules/timezone.py
@@ -98,7 +98,7 @@ def _get_zone_sysconfig(): def _get_zone_etc_localtime(): - tzfile = '/etc/localtime' + tzfile = _get_localtime_path() tzdir = '/usr/share/zoneinfo/' tzdir_len = len(tzdir) try: @@ -281,8 +281,9 @@ def set_zone(timezone): if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']: return 'Zone does not exist: {0}'.format(zonepath) - if os.path.exists('/etc/localtime'): - os.unlink('/etc/localtime') + tzfile = _get_localtime_path() + if os.path.exists(tzfile): + os.unlink(tzfile) if 'Solaris' in __grains__['os_family']: __salt__['file.sed']( @@ -300,7 +301,7 @@ def set_zone(timezone): __salt__['cmd.retcode'](cmd, python_shell=False) return False else: - os.symlink(zonepath, '/etc/localtime') + os.symlink(zonepath, tzfile) if 'RedHat' in __grains__['os_family']: __salt__['file.sed']( @@ -345,10 +346,10 @@ def zone_compare(timezone): return timezone == get_zone() if 'FreeBSD' in __grains__['os_family']: - if not os.path.isfile(_get_etc_localtime_path()): + if not os.path.isfile(_get_localtime_path()): return timezone == get_zone() - tzfile = _get_etc_localtime_path() + tzfile = _get_localtime_path() zonepath = _get_zone_file(timezone) try: return filecmp.cmp(tzfile, zonepath, shallow=False) @@ -364,7 +365,9 @@ def zone_compare(timezone): raise -def _get_etc_localtime_path(): +def _get_localtime_path(): + if 'nilrt' in __grains__['lsb_distrib_id']: + return '/etc/natinst/share/localtime' return '/etc/localtime' @@ -529,8 +532,9 @@ def set_hwclock(clock): 'Zone \'{0}\' does not exist'.format(zonepath) ) - os.unlink('/etc/localtime') - os.symlink(zonepath, '/etc/localtime') + tzfile = _get_localtime_path() + os.unlink(tzfile) + os.symlink(zonepath, tzfile) if 'Arch' in __grains__['os_family']: cmd = ['timezonectl', 'set-local-rtc',
Update super-admin-downgrade-from-super-content-import-permission.feature Some changes in the UI texts.
@@ -10,7 +10,7 @@ Background: And I uncheck the *Make super admin* checkbox Then I see that all checkboxes are unchecked and active And I see that the *Save changes* button is active - When I check the *Can import and export content channels* checkbox under *Device permissions* + When I check the *Can manage content on this device* checkbox under *Device permissions* And I see that *Save changes* is still active When I click *Save changes* Then I remain on this page
fix typo in `torch.sum` documentation Summary: Notice that an extra colon was added to `:attr:`, so in , `dim` shows up as ":attr::_dim_". This patch fixes the issue. Pull Request resolved:
@@ -4487,7 +4487,7 @@ Example:: .. function:: sum(input, dim, keepdim=False, dtype=None) -> Tensor Returns the sum of each row of the :attr:`input` tensor in the given -dimension :attr:`dim`. If :attr::`dim` is a list of dimensions, +dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, reduce over all of them. If :attr:`keepdim` is ``True``, the output tensor is of the same size
Startup script uses COUCHDB_ARGS_FILE for vm.args path This seems to be a discrepancy from the start, see original pull requests: * (code) * (docs) Note also the script seems unhappy with relative paths, so I've also snuck a wording change alongside the main fix here.
@@ -66,10 +66,10 @@ is the same as setting the ``ERL_FLAGS`` environment variable. If there is a need to use different ``vm.args`` or ``sys.config`` files, for example, in different locations to the ones provided by CouchDB, or you don't want to edit the original files, the default locations may be changed by -setting the COUCHDB_VM_ARGS_FILE or COUCHDB_SYSCONFIG_FILE environment +setting the COUCHDB_ARGS_FILE or COUCHDB_SYSCONFIG_FILE environment variables:: - export COUCHDB_VM_ARGS_FILE="/path/to/my/vm.args" + export COUCHDB_ARGS_FILE="/path/to/my/vm.args" export COUCHDB_SYSCONFIG_FILE="/path/to/my/sys.config" Parameter names and values
Fix geom_text when used with other stats The problem was, in the draw method the input dataframe can have other columns computed by stats. It is best not to reuse it to hold the parameters to be plotted by matplotlib.
from __future__ import (absolute_import, division, print_function, unicode_literals) +import pandas as pd from matplotlib.text import Text from ..utils import to_rgba, suppress @@ -72,17 +73,15 @@ class geom_text(geom): def setup_data(self, data): parse = self.params['parse'] - format_string = self.params['format_string'] + fmt = self.params['format_string'] # format - if format_string: - data['label'] = [ - format_string.format(l) for l in data['label']] + if fmt: + data['label'] = [fmt.format(l) for l in data['label']] # Parse latex if parse: - data['label'] = [ - '${}$'.format(l) for l in data['label']] + data['label'] = ['${}$'.format(l) for l in data['label']] return data @@ -93,19 +92,19 @@ class geom_text(geom): # Bind color and alpha color = to_rgba(data['color'], data['alpha']) - # Put all ax.text parameters in dataframe so - # that each row represents a text instance - data.rename(columns={'label': 's', - 'angle': 'rotation', - 'lineheight': 'linespacing'}, - inplace=True) - data['color'] = color - data['horizontalalignment'] = params['hjust'] - data['verticalalignment'] = params['vjust'] - data['family'] = params['family'] - data['fontweight'] = params['fontweight'] - data['zorder'] = params['zorder'] - data['clip_on'] = True + # Create a dataframe for the plotting data required + # by ax.text + df = data[['x', 'y', 'size']].copy() + df['s'] = data['label'] + df['rotation'] = data['angle'] + df['linespacing'] = data['lineheight'] + df['color'] = color + df['horizontalalignment'] = params['hjust'] + df['verticalalignment'] = params['vjust'] + df['family'] = params['family'] + df['fontweight'] = params['fontweight'] + df['zorder'] = params['zorder'] + df['clip_on'] = True # 'boxstyle' indicates geom_label so we need an MPL bbox draw_label = 'boxstyle' in params @@ -113,7 +112,7 @@ class geom_text(geom): fill = to_rgba(data.pop('fill'), data['alpha']) if isinstance(fill, tuple): fill = [list(fill)] * len(data['x']) - data['facecolor'] = fill + df['facecolor'] = fill if params['boxstyle'] in ('round', 'round4'): boxstyle = '{},pad={},rounding_size={}'.format( @@ -132,21 +131,11 @@ class geom_text(geom): bbox = {'linewidth': params['label_size'], 'boxstyle': boxstyle} else: - with suppress(KeyError): - del data['fill'] bbox = {} - # Unwanted - del data['PANEL'] - del data['group'] - del data['alpha'] - for key in ('xmin', 'xmax', 'ymin', 'ymax'): - with suppress(KeyError): - del data[key] - # For labels add a bbox for i in range(len(data)): - kw = data.iloc[i].to_dict() + kw = df.iloc[i].to_dict() if draw_label: kw['bbox'] = bbox kw['bbox']['edgecolor'] = kw['color']
mesos-dns: bump to Last commit is commit Author: kevin xu Date: Tue Mar 28 17:25:44 2017 +0800 fix typo
"single_source" : { "kind": "git", "git": "https://github.com/mesosphere/mesos-dns.git", - "ref": "bf08ab7017207b7d7924462cfbaf7b952155a53e", + "ref": "af6ebc7a70f722b219f79f433b242e22667cfd4c", "ref_origin": "master" }, "username": "dcos_mesos_dns"
Raise `read_delim` exception with different msgs Separate `len(msg_bytes) == 0` and `msg_bytes[-1:] != b"\n"`, to raise `ParseError` with different messages.
@@ -73,8 +73,12 @@ def encode_delim(msg: bytes) -> bytes: async def read_delim(reader: Reader) -> bytes: msg_bytes = await read_varint_prefixed_bytes(reader) - if len(msg_bytes) == 0 or msg_bytes[-1:] != b"\n": - raise ParseError(f'msg_bytes is not delimited by b"\\n": msg_bytes={msg_bytes}') + if len(msg_bytes) == 0: + raise ParseError(f"`len(msg_bytes)` should not be 0") + if msg_bytes[-1:] != b"\n": + raise ParseError( + f'`msg_bytes` is not delimited by b"\\n": `msg_bytes`={msg_bytes}' + ) return msg_bytes[:-1]
removed bahanceAPI This URL no longer accepting new clients
@@ -108,7 +108,6 @@ API | Description | Auth | HTTPS | CORS | API | Description | Auth | HTTPS | CORS | |---|---|---|---|---| | [Art Institute of Chicago](https://api.artic.edu/docs/) | Art | No | Yes | Yes | -| [Behance](https://www.behance.net/dev) | Design | `apiKey` | Yes | Unknown | | [ColourLovers](http://www.colourlovers.com/api) | Get various patterns, palettes and images | No | No | Unknown | | [Cooper Hewitt](https://collection.cooperhewitt.org/api) | Smithsonian Design Museum | `apiKey` | Yes | Unknown | | [Dribbble](http://developer.dribbble.com/v2/) | Design | `OAuth` | No | Unknown |
Use user key for genesis and proposal poet This kubernetes poet file previously used the validator key for genesis and proposal.
@@ -115,13 +115,16 @@ items: echo 'waiting for poet.batch'; \ sleep 1; \ done && \ + if [ ! -e /root/.sawtooth/keys/my_key.priv ]; then \ + sawtooth keygen my_key; \ + fi && \ cp /poet-shared/poet.batch / && \ if [ ! -e config-genesis.batch ]; then \ - sawset genesis -k /etc/sawtooth/keys/validator.priv -o config-genesis.batch; \ + sawset genesis -k /root/.sawtooth/keys/my_key.priv -o config-genesis.batch; \ fi && \ if [ ! -e config.batch ]; then \ sawset proposal create \ - -k /etc/sawtooth/keys/validator.priv \ + -k /root/.sawtooth/keys/my_key.priv \ sawtooth.consensus.algorithm.name=PoET \ sawtooth.consensus.algorithm.version=0.1 \ sawtooth.poet.report_public_key_pem=\"$(cat /poet-shared/simulator_rk_pub.pem)\" \ @@ -137,9 +140,6 @@ items: if [ ! -e /var/lib/sawtooth/genesis.batch ]; then \ sawadm genesis config-genesis.batch config.batch poet.batch; \ fi && \ - if [ ! -e /root/.sawtooth/keys/my_key.priv ]; then \ - sawtooth keygen my_key; \ - fi && \ sawtooth-validator -vv \ --endpoint tcp://$SAWTOOTH_0_SERVICE_HOST:8800 \ --bind component:tcp://eth0:4004 \
Fixed a rare error of StreamContinuityManager message: StreamContinuityManager disabled due to exception: 'NoneType' object is not subscriptable
@@ -96,6 +96,12 @@ class StreamContinuityManager(PlaybackActionManager): # otherwise Kodi reacts strangely if only one value of these is restored current_stream = self.current_streams['subtitle'] player_stream = player_state.get(STREAMS['subtitle']['current']) + if player_stream is None: + # I don't know the cause: + # Very rarely can happen that Kodi starts the playback with the subtitles enabled, + # but after some seconds subtitles become disabled, and 'currentsubtitle' of player_state data become 'None' + # Then _is_stream_value_equal() throw error. We do not handle it as a setting change from the user. + return is_sub_stream_equal = self._is_stream_value_equal(current_stream, player_stream) current_sub_enabled = self.current_streams['subtitleenabled']
Fix docs error Reference `django_q/conf.py`
@@ -92,7 +92,7 @@ retry ~~~~~ The number of seconds a broker will wait for a cluster to finish a task, before it's presented again. -Only works with brokers that support delivery receipts. Defaults to ``None``. +Only works with brokers that support delivery receipts. Defaults to 60. The value must be bigger than the time it takes to complete longest task, i.e. :ref:`timeout` must be less than retry value and all tasks must complete in less time than the selected retry time. If this does not hold, i.e. the retry value is less than timeout or less than it takes to finish a task,
Update LICENSE To 2019
The MIT License (MIT) -Copyright (c) 2013-2017 SimPEG Developers +Copyright (c) 2013-2019 SimPEG Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in
Sync: only update in_guild field when a member leaves The member and user update listeners should already be detecting and updating other fields so by the time a user leaves, the rest of the fields should be up-to-date. * Dedent condition which was indented too far
@@ -120,18 +120,8 @@ class Sync(Cog): @Cog.listener() async def on_member_remove(self, member: Member) -> None: - """Updates the user information when a member leaves the guild.""" - await self.bot.api_client.put( - f'bot/users/{member.id}', - json={ - 'avatar_hash': member.avatar, - 'discriminator': int(member.discriminator), - 'id': member.id, - 'in_guild': False, - 'name': member.name, - 'roles': sorted(role.id for role in member.roles) - } - ) + """Set the in_guild field to False when a member leaves the guild.""" + await self.patch_user(member.id, updated_information={"in_guild": False}) @Cog.listener() async def on_member_update(self, before: Member, after: Member) -> None:
New peer nodes for Feathercoin * Change of hard coded peer servers for Feathercoin The domain of the peer server hard coded in coins.py for Feathercoin has expired. Two new peers added.
@@ -1950,7 +1950,9 @@ class Feathercoin(Coin): RPC_PORT = 9337 REORG_LIMIT = 2000 PEERS = [ - 'electrumx-ch-1.feathercoin.ch s t', + 'electrumx-gb-1.feathercoin.network s t', + 'electrumx-gb-2.feathercoin.network s t', + 'electrumx-de-1.feathercoin.network s t', ]
Fix, "hasattr" didn't raise exceptions. * The C-API used "PyObject_HasAttr" never raises an exception, but of course the built-in should, so inline the "hasattr" built-in code.
@@ -586,14 +586,53 @@ NUITKA_MAY_BE_UNUSED static PyObject *BUILTIN_HASATTR( PyObject *source, PyObjec CHECK_OBJECT( source ); CHECK_OBJECT( attr_name ); - int res = PyObject_HasAttr( source, attr_name ); +#if PYTHON_VERSION < 300 + + if ( PyUnicode_Check( attr_name ) ) + { + attr_name = _PyUnicode_AsDefaultEncodedString( attr_name, NULL ); + + if (unlikely( attr_name == NULL )) + { + return NULL; + } + } - if (unlikely( res == -1 )) + if (unlikely( !PyString_Check( attr_name ) )) { + PyErr_Format( + PyExc_TypeError, + "hasattr(): attribute name must be string" + ); + + return NULL; + } +#else + if (unlikely( !PyUnicode_Check( attr_name ) )) + { + PyErr_Format( + PyExc_TypeError, + "hasattr(): attribute name must be string" + ); + + return NULL; + } +#endif + + PyObject *value = PyObject_GetAttr( source, attr_name ); + + if ( value == NULL ) + { + if ( PyErr_ExceptionMatches( PyExc_AttributeError ) ) + { + CLEAR_ERROR_OCCURRED(); + return Py_False; + } + return NULL; } - return BOOL_FROM(res == 1); + return Py_True; } #if PYTHON_VERSION < 300
Separating out the process of rendering from determining paths. Also updating imports to not conflict with argument and variable names.
@@ -24,9 +24,9 @@ from . import locales from . import messages from . import podcache from . import podspec -from . import routes -from . import static -from . import storage +from . import routes as grow_routes +from . import static as grow_static +from . import storage as grow_storage from . import tags @@ -52,7 +52,7 @@ class Pod(object): and isinstance(other, Pod) and self.root == other.root) - def __init__(self, root, storage=storage.auto, env=None, load_extensions=True): + def __init__(self, root, storage=grow_storage.auto, env=None, load_extensions=True): self._yaml = utils.SENTINEL self.storage = storage self.root = (root if self.storage.is_cloud_storage @@ -61,7 +61,7 @@ class Pod(object): else environment.Env(environment.EnvConfig(host='localhost'))) self.locales = locales.Locales(pod=self) self.catalogs = catalog_holder.Catalogs(pod=self) - self.routes = routes.Routes(pod=self) + self.routes = grow_routes.Routes(pod=self) self._podcache = None self._disabled = set() @@ -219,12 +219,20 @@ class Pod(object): return output def export(self, suffix=None, append_slashes=False): - """Builds the pod, returning a mapping of paths to content.""" - output = {} + """Builds the pod, returning a mapping of paths to content based on pod routes.""" routes = self.get_routes() paths = [] for items in routes.get_locales_to_paths().values(): paths += items + output = self.export_paths(paths, suffix, append_slashes) + error_controller = routes.match_error('/404.html') + if error_controller: + output['/404.html'] = error_controller.render({}) + return output + + def export_paths(self, paths, suffix=None, append_slashes=False): + """Builds the pod, returning a mapping of paths to content.""" + output = {} text = 'Building: %(value)d/{} (in %(elapsed)s)' widgets = [progressbar.FormatLabel(text.format(len(paths)))] bar = progressbar.ProgressBar(widgets=widgets, maxval=len(paths)) @@ -250,9 +258,6 @@ class Pod(object): self.logger.error('Error building: {}'.format(controller)) raise bar.update(bar.value + 1) - error_controller = routes.match_error('/404.html') - if error_controller: - output['/404.html'] = error_controller.render({}) bar.finish() return output @@ -411,14 +416,14 @@ class Pod(object): if controller.KIND == messages.Kind.STATIC: serving_path = controller.match_pod_path(pod_path) if serving_path: - return static.StaticFile(pod_path, serving_path, locale=locale, + return grow_static.StaticFile(pod_path, serving_path, locale=locale, pod=self, controller=controller, fingerprinted=controller.fingerprinted, localization=controller.localization) text = ('Either no file exists at "{}" or the "static_dirs" setting was ' 'not configured for this path in {}.'.format( pod_path, self.FILE_PODSPEC)) - raise static.BadStaticFileError(text) + raise grow_static.BadStaticFileError(text) def get_podspec(self): return self.podspec
Update README.md Correct some typos
@@ -108,11 +108,11 @@ We define a set of running arguments in SpeechBrain, these arguments can be set - `nonfinite_patience`: 3, Number of times to ignore non-finite losses before stopping. - `progressbar`: default True, Whether to display a progressbar when training. -All these args allow you to run a Multigpu experiment (using Data_parallel or Distributed_Data_parallel in signle/multiple machines), use Automatic Mixed Precision Training, use Just In Time (JIT) compiler over your module, do gradient clipping, handling non-finite values and show a progress bar during training. +All these args allow one to run a multigpu experiment (using Data_parallel or Distributed_Data_parallel in single/multiple machines), use Automatic Mixed Precision Training, use Just In Time (JIT) compiler over your module, do gradient clipping, handling non-finite values and show a progress bar during training. Important: - The command line args will always override the hparams file args. -- Data Parallel (`data_parallel_backend`) and Distributed Data Parallel (DDP) can't be activated in same time. +- Data Parallel (`data_parallel_backend`) and Distributed Data Parallel (DDP) can't be activated at the same time. ### MultiGPU training using Data Parallel The common pattern for using MultiGPU training over a single machine with Data Parallel: @@ -125,7 +125,7 @@ The common pattern for using MultiGPU training over a single machine with Data P Important: the batch size for each GPU process will be: `batch_size / data_parallel_count`. So you should consider changing the batch_size value. ### MultiGPU training using Distributed Data Parallel -For using DDP, you should consider using `torch.distributed.launch` for setting the subprocess with the right Unix variables `local_rank` and `rank`. The `local_rank` variable allow to set the right `device` arg for each DDP subprocess, the `rank` variable (which is uniq for each subprocess) will be used for registring the subprocess rank to the DDP group. In that way, we can manage multigpu training over multiple machines. +For using DDP, you should consider using `torch.distributed.launch` for setting the subprocess with the right Unix variables `local_rank` and `rank`. The `local_rank` variable allows setting the right `device` arg for each DDP subprocess, the `rank` variable (which is unique for each subprocess) will be used for registering the subprocess rank to the DDP group. In that way, we can manage multigpu training over multiple machines. The common pattern for using MultiGPU training with DDP (consider you have 2 servers with 2 GPU: ```
Don't add LD_LIBRARY_PATH default for android auto * Don't add LD_LIBRARY_PATH default for android auto Adding default value for LD_LIBRARY_PATH caused fuzzer libraries to not be used properly that were already packaged with the targets. * Update libfuzzer.py
@@ -1247,6 +1247,9 @@ class AndroidLibFuzzerRunner(new_process.UnicodeProcessRunner, LibFuzzerCommon): # Add directory containing libclang_rt.ubsan_standalone-aarch64-android.so # to LD_LIBRARY_PATH. + ld_library_path = '' + if not android.settings.is_automotive(): + # TODO(MHA3): Remove this auto check. ld_library_path = android.sanitizer.get_ld_library_path_for_sanitizers() if ld_library_path: default_args.append('LD_LIBRARY_PATH=' + ld_library_path)
test: now test_parse_from_package uses more unique name of the imported test package. Hopefully won't collide with anything.
@@ -9,7 +9,7 @@ def test_parse_from_package(session, tmpdir): with tmpdir.as_cwd() as old_dir: sys.path.append(str(tmpdir)) - task_dir = tmpdir.mkdir("tasks") + task_dir = tmpdir.mkdir("funcs_test_parse_pkg") # This should be unique so other test imports won't interfere task_dir.join("__init__.py") task_dir.join("mytask.py").write(dedent(""" def do_stuff_1(): @@ -17,12 +17,12 @@ def test_parse_from_package(session, tmpdir): def do_stuff_2(): pass """)) - task = parse_task({"class": "FuncTask", "func": "tasks.mytask:do_stuff_1"}) + task = parse_task({"class": "FuncTask", "func": "funcs_test_parse_pkg.mytask:do_stuff_1"}) assert inspect.isfunction(task.func) assert "do_stuff_1" == task.func.__name__ - assert "tasks.mytask:do_stuff_1" == task.name + assert "funcs_test_parse_pkg.mytask:do_stuff_1" == task.name - task = parse_task({"class": "FuncTask", "func": "tasks.mytask.do_stuff_2"}) + task = parse_task({"class": "FuncTask", "func": "funcs_test_parse_pkg.mytask.do_stuff_2"}) assert inspect.isfunction(task.func) assert "do_stuff_2" == task.func.__name__ - assert "tasks.mytask:do_stuff_2" == task.name + assert "funcs_test_parse_pkg.mytask:do_stuff_2" == task.name
Proper function parameter default Change `protocol_version` default to `None` because `protocol_version` is determined below and has the same default. ZD
@@ -194,7 +194,7 @@ def _get_ssl_opts(): def _connect(contact_points=None, port=None, cql_user=None, cql_pass=None, - protocol_version=4): + protocol_version=None): ''' Connect to a Cassandra cluster.
fix: added null check for locals in frappe.get_meta Since locals object is not populated for website, frappe.get_meta would throw an exception. This would break grid row in web forms.
@@ -8,6 +8,7 @@ frappe.provide('frappe.meta.doctypes'); frappe.provide("frappe.meta.precision_map"); frappe.get_meta = function(doctype) { + if(Object.keys(locals).length === 0 && locals.constructor === Object) return null return locals["DocType"][doctype]; }
Update Cross version tests on spark to include allowlist suite. Update Cross version tests on spark to include allowlist suite.
@@ -296,4 +296,4 @@ spark: maximum: "3.1.1" run: | # TODO: Add datasource autologging tests - pytest --verbose tests/spark_autologging/ml/test_pyspark_ml_autologging.py --large + find tests/spark_autologging/ml -name 'test*.py' | xargs -L 1 pytest --verbose --large
Fix linting error. Suppress mypy. Suppress mypy error. Suppress mypy error.
@@ -9,6 +9,8 @@ from functools import update_wrapper from operator import attrgetter from threading import Lock from threading import Thread +from typing import Any +from typing import TYPE_CHECKING import click from werkzeug.utils import import_string @@ -36,7 +38,12 @@ else: # We technically have importlib.metadata on 3.8+, # but the API changed in 3.10, so use the backport # for consistency. - import importlib_metadata as metadata # type: ignore + if TYPE_CHECKING: + metadata: Any + else: + # we do this to avoid a version dependent mypy error + # because importlib_metadata is not installed in python3.10+ + import importlib_metadata as metadata class NoAppException(click.UsageError):
ValuePlug: Added support for Houdini17.5 Compiler options for Houdini17.5 makes it complain about the const qualifier already in the declaration and not necessary in the definition.
@@ -99,7 +99,7 @@ struct HashCacheKey { } - const bool operator == ( const HashCacheKey &other ) const + bool operator == ( const HashCacheKey &other ) const { return other.plug == plug && other.contextHash == contextHash; }
bug:AWG start order * fix AWG start order bug for 5014 # Conflicts: # pycqed/measurement/waveform_control/pulsar.py * comment on the logic of starting multiple AWGs
@@ -31,7 +31,8 @@ class Pulsar(Instrument): A meta-instrument responsible for all communication with the AWGs. Contains information about all the available awg-channels in the setup. Starting, stopping and programming and changing the parameters of the AWGs - should be done through Pulsar. Supports Tektronix AWG5014 and ZI UHFLI. + should be done through Pulsar. Supports Tektronix AWG5014 and partially + ZI UHFLI. Args: default_AWG: Name of the AWG that new channels get defined on if no @@ -180,7 +181,10 @@ class Pulsar(Instrument): def start(self): """ - Start the active AWGs. + Start the active AWGs. If multiple AWGs are used in a setup where the + slave AWGs are triggered by the master AWG, then the slave AWGs must be + running and waiting for trigger when the master AWG is started to + ensure synchronous playback. """ if self.master_AWG() is None: for AWG in self.used_AWGs(): @@ -189,7 +193,18 @@ class Pulsar(Instrument): for AWG in self.used_AWGs(): if AWG != self.master_AWG(): self._start_AWG(AWG) - time.sleep(0.2) # wait 0.2 second for all other awg-s to start + tstart = time.time() + for AWG in self.used_AWGs(): + if AWG != self.master_AWG(): + good = False + while time.time() - tstart < 10: + if self._is_AWG_running(AWG): + good = True + break + else: + time.sleep(0.1) + if not good: + raise Exception('AWG {} did not start in 10s'.format(AWG)) self._start_AWG(self.master_AWG()) def stop(self): @@ -532,6 +547,15 @@ setTrigger(0); else: raise ValueError('Unsupported AWG type: {}'.format(type(obj))) + def _is_AWG_running(self, AWG): + obj = self.AWG_obj(AWG=AWG) + if isinstance(obj, Tektronix_AWG5014): + return obj.get_state() != 'Idle' + elif isinstance(obj, UHFQC): + raise NotImplementedError() + else: + raise ValueError('Unsupported AWG type: {}'.format(type(obj))) + def _set_default_AWG(self, AWG): self.AWG = self.AWG_obj(AWG=AWG)
Added LectServe to Calendar LectServe is a Protestant lectionary featuring the Revised Common Lectionary and the ACNA Lectionary
@@ -97,6 +97,7 @@ For information on contributing to this project, please see the [contributing gu | Church Calendar | Catholic liturgical calendar | No | No | [Go!](http://calapi.inadiutorium.cz/) | | Date and Time | Global Date and Time | No | No | [Go!](http://www.timeanddate.com/services/api/) | | Holidays | Free API for obtaining information about holidays. | No | No | [Go!](http://holidayapi.com/) | +| LectServe | Protestant liturgical calendar | No | No | [Go!](http://www.lectserve.com) | | Non-Working Days | Database of ICS files for non working days | No | Yes | [Go!](https://github.com/gadael/icsdb) | ### Cloud Storage & File Sharing
Updates match_aov_pattern() logic to handle empty regex Using `is not None` to simplify code and handle empty regex cases.
@@ -19,9 +19,6 @@ def match_aov_pattern(host_name, aov_patterns, render_file_name): bool: Review state for rendered file (render_file_name). """ aov_pattern = aov_patterns.get(host_name, []) - if aov_pattern: - if re.match(aov_pattern, render_file_name): - preview = True - return preview - else: + if not aov_pattern: return False + return re.match(aov_pattern, render_file_name) is not None
unpin metasv pinning causes installation issues
@@ -94,7 +94,7 @@ bio_nextgen: - manta;env=python2 - maxentscan - mbuffer - - metasv=0.4.0;env=python2 + - metasv;env=python2 - minimap2 - mintmap - mirdeep2=2.0.0.7
FAQ: add processing time of DDL * FAQ: add processing time of DDL Via: * FAQ: address comments * FAQ: udpate wording
@@ -386,13 +386,23 @@ The offline node usually indicates the TiKV node. You can determine whether the The lease parameter (`--lease=60`) is set from the command line when starting a TiDB server. The value of the lease parameter impacts the Database Schema Changes (DDL) speed of the current session. In the testing environments, you can set the value to 1s for to speed up the testing cycle. But in the production environments, it is recommended to set the value to minutes (for example, 60) to ensure the DDL safety. +#### What is the processing time of a DDL operation? + +The processing time is different for different scenarios. Generally, you can consider the following three scenarios: + +1. The `Add Index` operation with a relatively small number of rows in the corresponding data table: about 3s +2. The `Add Index` operation with a relatively large number of rows in the corresponding data table: the processing time depends on the specific number of rows and the QPS at that time (the `Add Index` operation has a lower priority than ordinary SQL operations) +3. Other DDL operations: about 1s + +Besides, if the TiDB server instance that receives the DDL request is the same TiDB server instance that the DDL owner is in, the first and third scenarios above might cost dozens to hundreds of milliseconds. + #### Why it is very slow to run DDL statements sometimes? Possible reasons: - If you run multiple DDL statements together, the last few DDL statements might run slowly. This is because the DDL statements are executed serially in the TiDB cluster. - After you start the cluster successfully, the first DDL operation may take a longer time to run, usually around 30s. This is because the TiDB cluster is electing the leader that processes DDL statements. -- In rolling updates or shutdown updates, the processing time of DDL statements in the first ten minutes after starting TiDB is affected by the server stop sequence (stopping PD -> TiDB), and the condition where TiDB does not clean up the registration data in time because TiDB is stopped using the `kill -9` command. When you run DDL statements during this period, for the state change of each DDL, you need to wait for 2 * lease (lease = 10s). +- The processing time of DDL statements in the first ten minutes after starting TiDB would be much longer than the normal case if you meet the following conditions: 1) TiDB cannot communicate with PD as usual when you are stopping TiDB (including the case of power failure); 2) TiDB fails to clean up the registration data from PD in time because TiDB is stopped by the `kill -9` command. If you run DDL statements during this period, for the state change of each DDL, you need to wait for 2 * lease (lease = 45s). - If a communication issue occurs between a TiDB server and a PD server in the cluster, the TiDB server cannot get or update the version information from the PD server in time. In this case, you need to wait for 2 * lease for the state processing of each DDL. #### Can I use S3 as the backend storage engine in TiDB?
Fix bug in get_between_ngrams Closes
@@ -40,7 +40,7 @@ def get_between_ngrams(c, attrib="words", n_min=1, n_max=1, lower=True): ): yield ngram else: # span0.get_word_start_index() > span1.get_word_start_index() - for ngram in get_left_ngrams( + for ngram in get_right_ngrams( span1, window=distance - 1, attrib=attrib,
dplay: add name of the episode in the filename fixes:
@@ -62,11 +62,12 @@ class Dplay(Service): show = match.group(1) season = jsondata["data"]["attributes"]["seasonNumber"] episode = jsondata["data"]["attributes"]["episodeNumber"] + name = jsondata["data"]["attributes"]["name"] if is_py2: show = filenamify(show).encode("latin1") else: show = filenamify(show) - return filenamify("{0}.s{1:02d}e{2:02d}".format(show, int(season), int(episode))) + return filenamify("{0}.s{1:02d}e{2:02d}.{3}".format(show, int(season), int(episode), name)) def find_all_episodes(self, options): parse = urlparse(self.url)
Add libncurses5-dev to apt install line We need libncurses5-dev installed, otherwise the built python won't be able to build in support for curses which is what we need for populate.py
@@ -15,7 +15,7 @@ echo "Updating apt, preparing to install libssl-dev, gettext, portaudio19-dev an # install dependencies sudo apt-get update # libssl-dev required to get the python _ssl module working -sudo apt-get install libssl-dev gettext portaudio19-dev libasound2-dev -y +sudo apt-get install libssl-dev gettext libncurses5-dev portaudio19-dev libasound2-dev -y # installing python 2.7.13 echo 'Installing python 3.5.3 to ~/.naomi/local'
Update tune_relay_vta.py to support single board support single pynq board run, change is credited to 'https://github.com/i24361's change, fixes the save fail issues and changes are discussed in
@@ -340,18 +340,6 @@ def register_vta_tuning_tasks(): def tune_and_evaluate(tuning_opt): - if env.TARGET != "sim": - # Get remote from fleet node - remote = autotvm.measure.request_remote( - env.TARGET, tracker_host, tracker_port, timeout=10000 - ) - # Reconfigure the JIT runtime and FPGA. - vta.reconfig_runtime(remote) - vta.program_fpga(remote, bitstream=None) - else: - # In simulation mode, host the RPC server locally. - remote = rpc.LocalSession() - # Register VTA tuning tasks register_vta_tuning_tasks() @@ -407,6 +395,19 @@ def tune_and_evaluate(tuning_opt): print("Tuning...") tune_tasks(tasks, **tuning_opt) + # evaluate with tuning history + if env.TARGET != "sim": + # Get remote from fleet node + remote = autotvm.measure.request_remote( + env.TARGET, tracker_host, tracker_port, timeout=10000 + ) + # Reconfigure the JIT runtime and FPGA. + vta.reconfig_runtime(remote) + vta.program_fpga(remote, bitstream=None) + else: + # In simulation mode, host the RPC server locally. + remote = rpc.LocalSession() + # compile kernels with history best records with autotvm.tophub.context(target, extra_files=[log_file]): # Compile network @@ -425,9 +426,9 @@ def tune_and_evaluate(tuning_opt): # Export library print("Upload...") temp = utils.tempdir() - lib.save(temp.relpath("graphlib.o")) - remote.upload(temp.relpath("graphlib.o")) - lib = remote.load_module("graphlib.o") + lib.export_library(temp.relpath("graphlib.tar")) + remote.upload(temp.relpath("graphlib.tar")) + lib = remote.load_module("graphlib.tar") # Generate the graph runtime ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
[client] Update default value to be of the right type. In optparse, if you use type='float', default=0, the default value type will be int. That's cute.
@@ -1176,7 +1176,7 @@ class TaskOutputStdoutOption(optparse.Option): def add_collect_options(parser): parser.server_group.add_option( - '-t', '--timeout', type='float', default=0, + '-t', '--timeout', type='float', default=0., help='Timeout to wait for result, set to -1 for no timeout and get ' 'current state; defaults to waiting until the task completes') parser.group_logging.add_option(
Simplify test so that it only concerns one epoch Rather than testing multiple redundantly
@@ -200,20 +200,13 @@ def test_only_process_eth1_data_votes_per_period(sample_beacon_state_params, con @pytest.mark.parametrize( "total_balance," "current_epoch_boundary_attesting_balance," - "previous_epoch_boundary_attesting_balance," "expected,", ( ( - 1500 * GWEI_PER_ETH, 1000 * GWEI_PER_ETH, 1000 * GWEI_PER_ETH, (True, True), + 1500 * GWEI_PER_ETH, 1000 * GWEI_PER_ETH, True, ), ( - 1500 * GWEI_PER_ETH, 1000 * GWEI_PER_ETH, 999 * GWEI_PER_ETH, (True, False), - ), - ( - 1500 * GWEI_PER_ETH, 999 * GWEI_PER_ETH, 1000 * GWEI_PER_ETH, (False, True), - ), - ( - 1500 * GWEI_PER_ETH, 999 * GWEI_PER_ETH, 999 * GWEI_PER_ETH, (False, False), + 1500 * GWEI_PER_ETH, 999 * GWEI_PER_ETH, False, ), ) ) @@ -223,10 +216,8 @@ def test_is_epoch_justifiable( config, expected, total_balance, - previous_epoch_boundary_attesting_balance, current_epoch_boundary_attesting_balance): current_epoch = 5 - previous_epoch = 4 from eth2.beacon.state_machines.forks.serenity import epoch_processing @@ -236,8 +227,6 @@ def test_is_epoch_justifiable( def mock_get_epoch_boundary_attesting_balance(state, attestations, epoch, config): if epoch == current_epoch: return current_epoch_boundary_attesting_balance - elif epoch == previous_epoch: - return previous_epoch_boundary_attesting_balance else: raise Exception("ensure mock is matching on a specific epoch") @@ -270,21 +259,14 @@ def test_is_epoch_justifiable( mock_get_active_validator_indices, ) - current_epoch_justifiable = _is_epoch_justifiable( + epoch_justifiable = _is_epoch_justifiable( sample_state, sample_state.current_epoch_attestations, current_epoch, - config - ) - - previous_epoch_justifiable = _is_epoch_justifiable( - sample_state, - sample_state.previous_epoch_attestations, - previous_epoch, config, ) - assert (current_epoch_justifiable, previous_epoch_justifiable) == expected + assert epoch_justifiable == expected @pytest.mark.parametrize(
Add ScrapingAnt API to the Development category I've added the ScrapingAnt API in the README.md file. It's a great API with the availability of free usage.
@@ -286,6 +286,7 @@ API | Description | Auth | HTTPS | CORS | | [ReqRes](https://reqres.in/ ) | A hosted REST-API ready to respond to your AJAX requests | No | Yes | Unknown | | [Scraper.AI](https://docs.scraper.ai/#/) | Extract and monitor data from any website | `apiKey` | Yes | Unknown | | [ScraperApi](https://www.scraperapi.com) | Easily build scalable web scrapers | `apiKey` | Yes | Unknown | +| [ScrapingAnt](https://docs.scrapingant.com) | Headless Chrome scraping with a simple API | `apiKey` | Yes | Unknown | | [ScreenshotAPI.net](https://screenshotapi.net/) | Create pixel-perfect website screenshots | `apiKey` | Yes | Yes | | [SHOUTCLOUD](http://shoutcloud.io/) | ALL-CAPS AS A SERVICE | No | No | Unknown | | [StackExchange](https://api.stackexchange.com/) | Q&A forum for developers | `OAuth` | Yes | Unknown |
Don't reimplement signal size no need to support super-old h5py
@@ -1245,9 +1245,9 @@ class _NXdataXYVScatterView(DataView): x_axis, y_axis = nxd.axes[-2:] if x_axis is None: - x_axis = numpy.arange(nxd.signal_size) + x_axis = numpy.arange(nxd.signal.size) if y_axis is None: - y_axis = numpy.arange(nxd.signal_size) + y_axis = numpy.arange(nxd.signal.size) x_label, y_label = nxd.axes_names[-2:] if x_label is not None:
refactor(minor): frappe.has_permission The throw block was very clearly broken. Referencing frappe.throw inside __init__.py rip. Added drop in replacement msgprint call
@@ -740,17 +740,26 @@ def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=Fals :param doc: [optional] Checks User permissions for given doc. :param user: [optional] Check for given user. Default: current user. :param parent_doctype: Required when checking permission for a child DocType (unless doc is specified).""" + import frappe.permissions + if not doctype and doc: doctype = doc.doctype - import frappe.permissions out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user, raise_exception=throw, parent_doctype=parent_doctype) + if throw and not out: - if doc: - frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name)) - else: - frappe.throw(_("No permission for {0}").format(doctype)) + # mimics frappe.throw + document_label = f"{doc.doctype} {doc.name}" if doc else doctype + msgprint( + _("No permission for {0}").format(document_label), + raise_exception=ValidationError, + title=None, + indicator='red', + is_minimizable=None, + wide=None, + as_list=False + ) return out
fix_sub_duration Only add this flag is subtitle streams are present
@@ -593,10 +593,12 @@ class MkvtoMp4: }, 'audio': audio_settings, 'subtitle': subtitle_settings, - 'preopts': ['-fix_sub_duration'], 'postopts': ['-threads', self.threads] } + if len(options['subtitle']) > 0: + option['preopts'] = ['-fix_sub_duration'] + # If using h264qsv, add the codec in front of the input for decoding if vcodec == "h264qsv" and info.video.codec.lower() == "h264" and self.qsv_decoder and (info.video.video_level / 10) < 5: options['preopts'].extend(['-vcodec', 'h264_qsv'])
Fixed selecting using handles bug Fixed enormously large cursor_x returned by get_cursor_from_xy when touch is righter then the last symbol of the line
@@ -1349,8 +1349,10 @@ class TextInput(FocusBehavior, Widget): ): cursor_x = i break + else: + cursor_x = len(lines[cursor_y]) - return int(cursor_x), int(cursor_y) + return cursor_x, cursor_y # # Selection control
[cleanup] Remove WikimediaSiteTestCase class The wmf flag was never used
@@ -1137,14 +1137,7 @@ class DefaultDrySiteTestCase(DefaultSiteTestCase): dry = True -class WikimediaSiteTestCase(TestCase): - - """Test class uses only WMF sites.""" - - wmf = True - - -class WikimediaDefaultSiteTestCase(DefaultSiteTestCase, WikimediaSiteTestCase): +class WikimediaDefaultSiteTestCase(DefaultSiteTestCase): """Test class to run against a WMF site, preferring the default site."""
Bug Store FTL translations with trailing newline That's because we need to serialize translations consistently with the python-fluent serializer. Otherwise translations get re-imported.
@@ -277,7 +277,7 @@ var Pontoon = (function (my) { translation = ' = ' + translation; } - return entity.key + translation; + return entity.key + translation + '\n'; },
[NV] - Curr. Hosp, Curr. ICU, and Curr. Vent Quaternary and quinary screenshots for NV now pull from their Hospitalizations by County tab and hover over their respective metrics. For Curr. Hosp., the hover over only reveals the confirmed value. The suspected value may require a separate screenshot.
@@ -433,6 +433,16 @@ quaternary: message: clicking on 'Antibody Tests - Test Based'. + NV: + overseerScript: > + page.manualWait(); + await page.waitForDelay(10000); + page.mouse.move(920, 406); + page.mouse.click(920, 406); + await page.waitForDelay(8000); + page.done(); + message: hospitalizations for NV quaternary + PR: overseerScript: > await page.waitForNavigation({waitUntil:"domcontentloaded"}); @@ -471,3 +481,12 @@ quinary: page.click("#ember772"); page.done(); message: click button for PR confirmed Deaths ("Muertes Confirmades") + + NV: + overseerScript: > + page.manualWait(); + await page.waitForDelay(10000); + page.mouse.move(926, 618); + await page.waitForDelay(10000); + page.done(); + message: hover over ICU and ventilators for NV quinary
pkg_implementation_spec_ada.mako: minor reformatting TN:
@@ -526,6 +526,7 @@ package ${ada_lib_name}.Analysis.Implementation is type ${root_node_value_type} is abstract tagged record Parent : ${root_node_type_name} := null; + -- Reference to the parent node, or null if this is the root one Unit : Analysis_Unit := No_Analysis_Unit; -- Reference to the analysis unit that owns this node
Fix a bad docstring in provisioning blocks module Docstring title had the opposite callback behavior of what actually happens. TrivialFix
@@ -80,7 +80,7 @@ def add_provisioning_component(context, object_id, object_type, entity): @db_api.retry_if_session_inactive() def remove_provisioning_component(context, object_id, object_type, entity, standard_attr_id=None): - """Removes a provisioning block for an object with triggering a callback. + """Remove a provisioning block for an object without triggering a callback. Removes a provisioning block without triggering a callback. A user of this module should call this when a block is no longer correct. If the block has
Raise floor version of botocore Needed for the ``ResourceInUseException``.
@@ -8,7 +8,7 @@ with open('README.rst') as readme_file: install_requires = [ 'click>=6.6,<7.0', - 'botocore>=1.5.40,<2.0.0', + 'botocore>=1.10.48,<2.0.0', 'typing==3.6.4', 'six>=1.10.0,<2.0.0', 'pip>=9,<11',
Change "target" to "file" The open_file command expects "file".
@@ -90,7 +90,7 @@ TEMPLATES = dict( { "caption": "README", "command": "open_file", "args": { - "target": "\${packages}/$1/README.md" + "file": "\${packages}/$1/README.md" } }, { "caption": "-" },
Add linecache module to skipped modules for pytest plugin see fixes the problem under Python 3, but not under Python 2
@@ -8,11 +8,14 @@ def my_fakefs_test(fs): fs.create_file('/var/data/xx1.txt') assert os.path.exists('/var/data/xx1.txt') """ +import linecache + import py import pytest from pyfakefs.fake_filesystem_unittest import Patcher Patcher.SKIPMODULES.add(py) # Ignore pytest components when faking filesystem +Patcher.SKIPMODULES.add(linecache) # Seems to be used by pytest internally @pytest.fixture
Added lock param to the issuance Now a token can be issued, locked and transferred in one tx
"value":">QQ??If" }, "753000":{ - "value":">QQ???If" + "value":">QQ????If" } }, "testnet":{ }, "2288000":{ "value":">QQ???If" + }, + "2342000":{ + "value":">QQ????If" } } }, "value":26 }, "753000":{ - "value":27 + "value":28 } }, "testnet":{ }, "2288000":{ "value":27 + }, + "2342000":{ + "value":28 } } }, "value":">QQ?B" }, "753000":{ - "value":">QQ??B" + "value":">QQ???B" } }, "testnet":{ }, "2288000":{ "value":">QQ??B" + }, + "2342000":{ + "value":">QQ???B" } } }, "value":18 }, "753000":{ - "value":19 + "value":20 } }, "testnet":{ }, "2288000":{ "value":19 + }, + "2342000":{ + "value":20 } } }
project: Ignore failure to remove the sample hooks Removing the sample hooks is just clean up, so if repo cannot remove a sample hook that should not cause it to fail.
@@ -2554,7 +2554,10 @@ class Project(object): # Delete sample hooks. They're noise. for hook in glob.glob(os.path.join(hooks, '*.sample')): + try: platform_utils.remove(hook, missing_ok=True) + except PermissionError: + pass for stock_hook in _ProjectHooks(): name = os.path.basename(stock_hook)
Update __init__.py update version for nccl & mpi uint8
# file 'LICENSE.txt', which is part of this source code package. # *************************************************************** -__version__ = '1.2.3.103' +__version__ = '1.2.3.104' from jittor_utils import lock with lock.lock_scope(): ori_int = int
Update setup.py to use marathon >= 0.9.2 Jenkins doesnt seem to be respecting requirements.txt , its still using 0.9.0
@@ -46,7 +46,7 @@ setup( 'isodate >= 0.5.0', 'jsonschema[format]', 'kazoo >= 2.0.0', - 'marathon >= 0.9.0', + 'marathon >= 0.9.2', 'progressbar2 >= 3.10.0', 'pyramid >= 1.8', 'pymesos >= 0.2.0',
Updated Skype for Business information * Updated Skype for Business information Included the link to the the plugin that is developed by Mattermost itself instead of korgz. * Update video-and-audio-calling.rst
@@ -34,13 +34,13 @@ BigBlueButton - Supports a self-hosted on-prem solution, and a vendor-hosted cloud solution. - Source code and docs available at: https://github.com/blindsidenetworks/mattermost-plugin-bigbluebutton. -Skype for Business -~~~~~~~~~~~~~~~~~~~~ +Skype for Business (Beta) +~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Start and join voice calls, video calls and use screensharing with your team members, developed by kosgrz. +- Start and join voice calls, video calls and use screensharing with your team members, developed by kosgrz, and maintained by Mattermost. - Clicking a video icon in a Mattermost channel invites team members to join a Skype for Business call, hosted using the credentials of the user who initiated the call. - Supports a vendor-hosted cloud solution. -- Source code and docs available at: https://github.com/kosgrz/mattermost-plugin-skype4business. +- Source code and docs available at: https://github.com/mattermost/mattermost-plugin-skype4business WebRTC ~~~~~~~~~~~~~~~~~~~~
Introduce decorator and use on collect_inactive_awws
@@ -179,6 +179,15 @@ SQL_FUNCTION_PATHS = [ ] +# Tasks that are only to be run on ICDS_ENVS should be marked +# with @only_icds_periodic_task rather than @periodic_task +if settings.SERVER_ENVIRONMENT in settings.ICDS_ENVS: + only_icds_periodic_task = periodic_task +else: + def only_icds_periodic_task(**kwargs): + return lambda fn: fn + + @periodic_task(run_every=crontab(minute=0, hour=18), acks_late=True, queue='icds_aggregation_queue') def run_move_ucr_data_into_aggregation_tables_task(): @@ -987,15 +996,10 @@ def _get_value(data, field): return getattr(data, field) or default -# This task causes issues on the india environment -# so it's limited to only ICDS_ENVS -if settings.SERVER_ENVIRONMENT in settings.ICDS_ENVS: - @periodic_task(run_every=crontab(minute=30, hour=18), acks_late=True, queue='icds_aggregation_queue') +# This task caused memory spikes once a day on the india env +# before it was switched to icds-only (June 2019) +@only_icds_periodic_task(run_every=crontab(minute=30, hour=18), acks_late=True, queue='icds_aggregation_queue') def collect_inactive_awws(): - _collect_inactive_awws() - - -def _collect_inactive_awws(): celery_task_logger.info("Started updating the Inactive AWW") filename = "inactive_awws_%s.csv" % date.today().strftime('%Y-%m-%d') last_sync = IcdsFile.objects.filter(data_type='inactive_awws').order_by('-file_added').first()
Added local logging handler and using it instead of global Fixes
@@ -21,6 +21,8 @@ from abc import ABCMeta import logging import warnings import re +import sys + from ..compat import add_metaclass from ..exceptions import XMLSchemaTypeError, XMLSchemaURLError, XMLSchemaKeyError, \ @@ -56,7 +58,10 @@ from .wildcards import XsdAnyElement, XsdAnyAttribute, Xsd11AnyElement, \ from .globals_ import XsdGlobals logger = logging.getLogger('xmlschema') -logging.basicConfig(format='[%(levelname)s] %(message)s') +logging_formater = logging.Formatter('[%(levelname)s] %(message)s') +logging_handler = logging.StreamHandler(sys.stderr) +logging_handler.setFormatter(logging_formater) +logger.addHandler(logging_handler) XSD_VERSION_PATTERN = re.compile(r'^\d+\.\d+$')
Fix oc group get Fixes
@@ -1522,7 +1522,7 @@ class OCGroup(OpenShiftCLI): result = self._get(self.kind, self.config.name) if result['returncode'] == 0: self.group = Group(content=result['results'][0]) - elif 'groups \"{}\" not found'.format(self.config.name) in result['stderr']: + elif 'groups.user.openshift.io \"{}\" not found'.format(self.config.name) in result['stderr']: result['returncode'] = 0 result['results'] = [{}]
2.10.4 Automatically generated by python-semantic-release
@@ -9,7 +9,7 @@ https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers """ from datetime import timedelta -__version__ = "2.10.3" +__version__ = "2.10.4" PROJECT_URL = "https://github.com/custom-components/alexa_media_player/" ISSUE_URL = "{}issues".format(PROJECT_URL)
Cleanup, avoid code duplication * This is most probably a remainder of previous re-factorings that introduced a single method to define the entries.
@@ -77,50 +77,19 @@ def getMetapathLoaderBodyCode(other_modules): metapath_module_decls = [] for other_module in other_modules: - if other_module.isUncompiledPythonModule(): - code_data = other_module.getByteCode() - is_package = other_module.isUncompiledPythonPackage() - - flags = ["NUITKA_BYTECODE_FLAG"] - if is_package: - flags.append("NUITKA_PACKAGE_FLAG") - - metapath_loader_inittab.append( - template_metapath_loader_bytecode_module_entry - % { - "module_name": other_module.getFullName(), - "bytecode": stream_data.getStreamDataOffset(code_data), - "size": len(code_data), - "flags": " | ".join(flags), - } - ) - else: metapath_loader_inittab.append( getModuleMetapathLoaderEntryCode(module=other_module) ) if other_module.isCompiledPythonModule(): metapath_module_decls.append( - "extern PyObject *modulecode_%(module_identifier)s(char const *);" + "extern PyObject *modulecode_%(module_identifier)s(PyObject *);" % {"module_identifier": other_module.getCodeName()} ) for uncompiled_module in getUncompiledNonTechnicalModules(): - code_data = uncompiled_module.getByteCode() - is_package = uncompiled_module.isUncompiledPythonPackage() - - flags = ["NUITKA_BYTECODE_FLAG"] - if is_package: - flags.append("NUITKA_PACKAGE_FLAG") - metapath_loader_inittab.append( - template_metapath_loader_bytecode_module_entry - % { - "module_name": uncompiled_module.getFullName(), - "bytecode": stream_data.getStreamDataOffset(code_data), - "size": len(code_data), - "flags": " | ".join(flags), - } + getModuleMetapathLoaderEntryCode(module=uncompiled_module) ) return template_metapath_loader_body % {
In addition to the fix, I propose a solution for the meta class selection of a model element to determine the object processor with (hopefully) minimal performance impact...
@@ -564,20 +564,41 @@ def parse_tree_to_objgraph(parser, parse_tree, file_name=None, if result is not None: setattr(model_obj, metaattr.name, result) - # find obj_proc of rule found in grammar - # (this obj_processor dominates of a processor for the current - # meta class of the object being processed) - obj_processor = metamodel.obj_processors.get( - metaclass_of_grammar_rule.__name__, None) - - # if not found and if rule in grammar differs from current rule of obj - if obj_processor is None and current_metaclass_of_obj is not None: - obj_processor = metamodel.obj_processors.get( + # return value of obj_processor + return_value_grammar = None + return_value_obj = None + + # call obj_proc of the current meta_class + if current_metaclass_of_obj is not None and \ + current_metaclass_of_obj is not metaclass_of_grammar_rule: + obj_processor_obj = metamodel.obj_processors.get( current_metaclass_of_obj.__name__, None) + if obj_processor_obj: + return_value_obj = obj_processor_obj(model_obj) - # if an object processor is found, call it. - if obj_processor: - return obj_processor(model_obj) + # call obj_proc of rule found in grammar + obj_processor_grammar = metamodel.obj_processors.get( + metaclass_of_grammar_rule.__name__, None) + if obj_processor_grammar: + return_value_grammar = obj_processor_grammar(model_obj) + + # both obj_processors are called, if two different processors + # are defined for the object metaclass and the grammar metaclass: + # e.g. + # Base: Special1|Special2; + # RuleCurrentlyChecked: att_to_be_checked=[Base] + # with object processors defined for Base, Special1, and Special2. + # + # Both processors are called, but for the return value the + # obj_processor corresponding to the object (e.g. of type Special1) + # dominates over the obj_processor of the grammar rule (Base). + # + # The order they are called is: first object (e.g., Special1), then + # the grammar based metaclass object processor (e.g., Base). + if return_value_obj: + return return_value_obj + else: + return return_value_grammar # or None model = process_node(parse_tree) # Register filename of the model for later use (e.g. imports/scoping).
dmap: Add missing tags aelb and casa Relates to
@@ -22,6 +22,7 @@ def _read_unknown(data, start, length): # These are the tags that we know about so far _TAGS = { + "aelb": DmapTag(read_bool, "com.apple.itunes.like-button"), "aels": DmapTag(read_uint, "com.apple.itunes.liked-state"), "aeFP": DmapTag(read_uint, "com.apple.itunes.req-fplay"), "aeGs": DmapTag(read_bool, "com.apple.itunes.can-be-genius-seed"), @@ -100,6 +101,7 @@ _TAGS = { "caks": DmapTag(read_uint, "unknown tag"), "caov": DmapTag(read_uint, "unknown tag"), "capl": DmapTag(read_bytes, "unknown tag"), + "casa": DmapTag(read_uint, "unknown tag"), "casc": DmapTag(read_uint, "unknown tag"), "cass": DmapTag(read_uint, "unknown tag"), "ceQA": DmapTag(read_uint, "unknown tag"),
Correct the instance migration link 1. Update the instance migration link 2. remove the unnecessary install-guide link configure and install Ceilometer by is enough remove the other link.
@@ -366,7 +366,7 @@ Configure Nova compute Please check your hypervisor configuration to correctly handle `instance migration`_. -.. _`instance migration`: http://docs.openstack.org/admin-guide/compute-live-migration-usage.html +.. _`instance migration`: https://docs.openstack.org/nova/latest/admin/migration.html Configure Measurements ====================== @@ -374,7 +374,6 @@ Configure Measurements You can configure and install Ceilometer by following the documentation below : #. https://docs.openstack.org/ceilometer/latest -#. http://docs.openstack.org/kilo/install-guide/install/apt/content/ceilometer-nova.html The built-in strategy 'basic_consolidation' provided by watcher requires "**compute.node.cpu.percent**" and "**cpu_util**" measurements to be collected
IndexedData: Allow `Ellipsis` operator in low-level indexing expressions This allows us to use neat short-cuts such as `a.indexed[t + 1, ...]` to substitute the time dimension in low-level notation.
@@ -602,6 +602,30 @@ class IndexedData(IndexedBase): obj.function = self.function return obj + def __getitem__(self, idx): + """Allow the use of `Ellipsis` to derive default indices from + the original `SymbolicData` (`sympy.Function`) symbol in indexed + notation. This provides notational short-cuts, such as: + + In > a = TimeData(name='a', shape=(2, 3, 4)) + Out > a(t, x, y, z) + + In > a.indexed[time, ...] + Out > a[time, x, y, z] + """ + if idx is Ellipsis: + # Argument is only a single ellipsis, indexify() + return self[self.function.indices] + elif Ellipsis in idx: + # Partial replacement with defaults from parten function + i = idx.index(Ellipsis) + j = len(self.function.indices) - len(idx) + 1 + idx_ell = list(self.function.indices[i:i+j]) + indices = list(idx[:i]) + idx_ell + list(idx[i+1:]) + return self[indices] + else: + return super(IndexedData, self).__getitem__(idx) + class EmptyIndexed(Symbol):
Fix bugs introduced by merge from dev. - Somehow I lost the **kwargs in Nested _deserialize. - Added **kwargs to Pluck(Nested) _deserialize.
@@ -470,9 +470,9 @@ class Nested(Field): raise ValidationError(exc.messages, data=data, valid_data=exc.valid_data) return valid_data - def _deserialize(self, value, attr, data): + def _deserialize(self, value, attr, data, **kwargs): self._test_collection(value) - return self._load(value, data) + return self._load(value, data, **kwargs) class Pluck(Nested): @@ -504,13 +504,13 @@ class Pluck(Nested): return utils.pluck(ret, key=self._field_data_key) return ret[self._field_data_key] - def _deserialize(self, value, attr, data): + def _deserialize(self, value, attr, data, **kwargs): self._test_collection(value) if self.many: value = [{self._field_data_key: v} for v in value] else: value = {self._field_data_key: value} - return self._load(value, data) + return self._load(value, data, **kwargs) class List(Field):
sidekick works, robot gets to a nice position to get up to standing stance. Not quite done
-time,0.5,1.5,2,3 -left_leg_motor_0,0,0,0,0 -left_leg_motor_1,0.1,0.1,0.1,0.1 -left_leg_motor_2,0.25,0.25,0.25,0.25 -left_leg_motor_3,-0.4,-0.4,-0.4,-0.4 -left_leg_motor_4,0.2,0.2,0.2,0.2 -left_leg_motor_5,-0.1,-0.1,-0.1,-0.1 -left_arm_motor_0,0,0,0,0 -left_arm_motor_1,2,2,2,2 -right_leg_motor_0,0,0,0,0 -right_leg_motor_1,0.1,0.1,1,1 -right_leg_motor_2,0.25,0.25,0.25,0.25 -right_leg_motor_3,-0.4,-0.4,-0.4,-0.4 -right_leg_motor_4,0.2,0.2,0.2,0.2 -right_leg_motor_5,-0.1,-0.1,-0.1,-0.1 -right_arm_motor_0,0,0,0,0 -right_arm_motor_1,2,2,2,2 -comment,stance,x,, +time,0.5,1.5,1.7,5,5.5,7 +left_leg_motor_0,0,0,0,0,0,0 +left_leg_motor_1,0.1,0.1,0.2,0.2,0.5,0.5 +left_leg_motor_2,0.25,0.25,0.25,0.25,0.3,0.3 +left_leg_motor_3,-0.4,-0.4,-0.4,-0.4,-0.45,-0.45 +left_leg_motor_4,0.2,0.2,0.2,0.2,0.3,0.3 +left_leg_motor_5,-0.1,-0.1,-0.2,-0.2,-0.2,-0.2 +left_arm_motor_0,0,0,0,0,0,0 +left_arm_motor_1,2,2,2,2,2,2 +right_leg_motor_0,0,0,0,0,0,0 +right_leg_motor_1,0.1,0.1,0.5,0.5,-0.1,-0.1 +right_leg_motor_2,0.25,0.25,0.25,0.25,0.45,0.45 +right_leg_motor_3,-0.4,-0.4,-0.4,-0.4,-1,-1 +right_leg_motor_4,0.2,0.2,0.2,0.2,0.5,0.5 +right_leg_motor_5,-0.1,-0.1,-0.1,-0.1,0.1,0.1 +right_arm_motor_0,0,0,0,0,0,0 +right_arm_motor_1,2,2,2,2,2,2 +comment,stance,x,kick,,prepare feet to get up,
Add links to the NumPy Testing Guidelines. [skip actions] [skip travis] [skip azp]
@@ -7,7 +7,10 @@ are not explicitly stated in the existing guidelines and standards, including * `PEP-8 <https://www.python.org/dev/peps/pep-0008>`_ Style Guide for Python Code * `PEP-257 <https://www.python.org/dev/peps/pep-0257>`_ Docstring Conventions -* `NumPy docstring standard <https://numpydoc.readthedocs.io/en/latest/format.html>`_ +* `NumPy docstring standard + <https://numpydoc.readthedocs.io/en/latest/format.html>`_ +* NumPy `Testing Guidelines + <https://docs.scipy.org/doc/numpy/reference/testing.html>`_ Some of these are trivial, and might not seem worth discussing, but in many cases, the issue has come up in a pull request review in either the SciPy @@ -54,6 +57,8 @@ functions:: instead of this function for more consistent floating point comparisons. +For more information about writing unit tests, see `Testing Guidelines +<https://docs.scipy.org/doc/numpy/reference/testing.html>`_. Testing that expected exceptions are raised ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Update plot_defaults.py prevent defaults from overwriting user 'ylim' kwarg
@@ -112,6 +112,7 @@ def matelem_vs_paramvals(specdata): def dressed_spectrum(sweep, **kwargs): """Plot defaults for sweep_plotting.dressed_spectrum""" + if 'ylim' not in kwargs: kwargs['ymax'] = kwargs.get('ymax') or min(15, (np.max(sweep.dressed_specdata.energy_table) - np.min(sweep.dressed_specdata.energy_table))) kwargs['xlabel'] = kwargs.get('xlabel') or sweep.param_name
rename variable according to cocoa OptionContainer fix import
from toga.interface import OptionContainer as OptionContainerInterface -from src.android.toga_android.widgets.base import WidgetMixin +from ..container import Container from ..libs import WinForms +from .base import WidgetMixin from System import Drawing @@ -12,18 +13,22 @@ class TogaOptionContainer(WinForms.TabControl): class OptionContainer(OptionContainerInterface, WidgetMixin): + _CONTAINER_CLASS = Container + def __init__(self, id=None, style=None, content=None): super(OptionContainer, self).__init__(id=id, style=style, content=content) self._create() def create(self): - self._container = self # TODO Why? + #self._container = self # TODO Why? self._impl = TogaOptionContainer(self) def _add_content(self, label, container, widget): - tabPage1 = WinForms.TabPage() - tabPage1.Text = label - tabPage1.Size = Drawing.Size(256, 214) # TODO test - tabPage1.TabIndex = 0 # TODO remove? - self._impl.Controls.Add(tabPage1) - tabPage1.Controls.Add(container._impl) + item = WinForms.TabPage() + item.Text = label + item.Size = Drawing.Size(256, 214) # TODO test + item.TabIndex = 0 # TODO remove? + # TODO Expansion? + + item.Controls.Add(container._impl) + self._impl.Controls.Add(item)
Update installation.md Added missing \ in line 70
@@ -67,7 +67,7 @@ docker run -d \ --restart=unless-stopped \ --mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 \ --device /dev/bus/usb:/dev/bus/usb \ - --device /dev/dri/renderD128 + --device /dev/dri/renderD128 \ -v <path_to_directory_for_media>:/media/frigate \ -v <path_to_config_file>:/config/config.yml:ro \ -v /etc/localtime:/etc/localtime:ro \
DOC: Added missing example to stats.mstats.variation DOC: add example to stats.mstats.variation
@@ -2157,6 +2157,21 @@ def variation(a, axis=0): ----- For more details about `variation`, see `stats.variation`. + Examples + -------- + >>> from scipy.stats.mstats import variation + >>> a = np.array([2,8,4]) + >>> variation(a) + 0.5345224838248487 + >>> b = np.array([2,8,3,4]) + >>> c = np.ma.masked_array(b, mask=[0,0,1,0]) + >>> variation(c) + 0.5345224838248487 + + In the example above, it can be seen that this works the same as + `stats.variation` except 'stats.mstats.variation' ignores masked + array elements. + """ a, axis = _chk_asarray(a, axis) return a.std(axis)/a.mean(axis)
Update tdvt.py type hints add more detail to the cl test summary
@@ -18,7 +18,7 @@ import threading import time import zipfile from pathlib import Path -from typing import List, Tuple, Union +from typing import List, Optional, Tuple, Union from .config_gen.datasource_list import print_ds, print_configurations, print_logical_configurations from .config_gen.tdvtconfig import TdvtInvocation @@ -243,6 +243,8 @@ def enqueue_failed_tests(run_file: Path, root_directory, args, rt: RunTimeTestCo all_tdvt_test_configs = {} all_test_pairs = [] failed_tests = tests['failed_tests'] + skipped_tests = tests['skipped_tests'] + failed_and_skipped_tests = {**failed_tests, **skipped_tests} # Go through the failed tests and group the ones that can be run together in a FileTestSet. for f in failed_tests: test_file_path = f['test_file'] @@ -524,7 +526,7 @@ def test_runner(all_tests, test_queue, max_threads): return failed_tests, skipped_tests, disabled_tests, total_tests -def run_tests_impl(tests: List[TestSet], max_threads, args): +def run_tests_impl(tests: List[TestSet], max_threads, args) -> Optional[Tuple[int, int, int, int]]: if not tests: print("No tests found. Check arguments.") sys.exit() @@ -604,13 +606,17 @@ def run_tests_impl(tests: List[TestSet], max_threads, args): skipped_tests += skipped_smoke_tests disabled_tests += disabled_smoke_tests total_tests += total_smoke_tests + total_tests_run = total_tests - disabled_tests - skipped_tests + total_passed_tests = total_tests_run - failed_tests print('\n') print("Total time: " + str(time.time() - start_time)) + print("Total passed tests: {}".format(total_passed_tests)) print("Total failed tests: " + str(failed_tests)) print("Total disabled tests: " + str(disabled_tests)) print("Total skipped tests: " + str(skipped_tests)) - print("Total tests ran: " + str(total_tests)) + print("Total tests: " + str(total_tests)) + print("Total tests run: " + str(total_tests_run)) return failed_tests, skipped_tests, disabled_tests, total_tests
Fix isort Summary: builds failing - isort 5, released today, is not compatible w/ current pylint Test Plan: buildkite Reviewers: schrockn, max, alangenfeld, sashank
@@ -2,7 +2,7 @@ black==19.10b0; python_version >= '3.6' coverage==4.5.4 # 5.0 release on 12/19 broke everything flake8>=3.7.8 grpcio-tools==1.30.0 -isort>=4.3.21 +isort<5,>=4.3.21 mock==3.0.5 pytest-mock==2.0.0 nbsphinx==0.4.2
Restores test_parquet in http Closes This test is not failing anymore. Parameterizes the test on the two engines, pyarrow and fastparquet. Pyarrow was not available when the test was originally written but has since become available.
@@ -171,17 +171,18 @@ def test_open_glob(dir_server): @pytest.mark.network [email protected](reason="https://github.com/dask/dask/issues/5042", strict=False) -def test_parquet(): [email protected]("engine", ("pyarrow", "fastparquet")) +def test_parquet(engine): pytest.importorskip("requests", minversion="2.21.0") dd = pytest.importorskip("dask.dataframe") - pytest.importorskip("fastparquet") # no pyarrow compatibility FS yet + pytest.importorskip(engine) df = dd.read_parquet( [ "https://github.com/Parquet/parquet-compatibility/raw/" "master/parquet-testdata/impala/1.1.1-NONE/" "nation.impala.parquet" - ] + ], + engine=engine, ).compute() assert df.n_nationkey.tolist() == list(range(25)) assert df.columns.tolist() == ["n_nationkey", "n_name", "n_regionkey", "n_comment"]
Fixed following on flowlist Fixed a focus_follow was missed when options were cleaned up.
@@ -396,7 +396,7 @@ class FlowListBox(urwid.ListBox): ) elif key == "F": o = self.master.options - o.focus_follow = not o.focus_follow + o.console_focus_follow = not o.console_focus_follow elif key == "v": val = not self.master.options.console_order_reversed self.master.options.console_order_reversed = val
DOC: fix documentation for typedescr argument of PyArray_AsCArray The PyArray_AsCArray API does not conform to the documentation and document that the call steals a reference to the PyArray_Descr argument.
@@ -2176,8 +2176,8 @@ Array Functions ^^^^^^^^^^^^^^^ .. c:function:: int PyArray_AsCArray( \ - PyObject** op, void* ptr, npy_intp* dims, int nd, int typenum, \ - int itemsize) + PyObject** op, void* ptr, npy_intp* dims, int nd, \ + PyArray_Descr* typedescr) Sometimes it is useful to access a multidimensional array as a C-style multi-dimensional array so that algorithms can be @@ -2207,14 +2207,11 @@ Array Functions The dimensionality of the array (1, 2, or 3). - :param typenum: + :param typedescr: - The expected data type of the array. - - :param itemsize: - - This argument is only needed when *typenum* represents a - flexible array. Otherwise it should be 0. + A :c:type:`PyArray_Descr` structure indicating the desired data-type + (including required byteorder). The call will steal a reference to + the parameter. .. note::
fix test regexp On OpenBSD the output is sh: <filename>: cannot execute - Is a directory
@@ -64,7 +64,7 @@ scons: \\*\\*\\* \\[%s\\] Error 1 """ cannot_execute = """\ -(sh: )*.+: cannot execute( \\[Is a directory\\])? +(sh: )*.+: cannot execute(( -)? \\[?Is a directory\\]?)? scons: \\*\\*\\* \\[%s\\] Error %s """
Handle non-user bans when iterating banned participants Closes
@@ -233,6 +233,9 @@ class _ParticipantsIter(RequestIter): for participant in participants.participants: if isinstance(participant, types.ChannelParticipantBanned): + if not isinstance(participant.peer, types.PeerUser): + # May have the entire channel banned. See #3105. + continue user_id = participant.peer.user_id else: user_id = participant.user_id
Guard against extraneous axes in Hist.fill Fixes
@@ -875,7 +875,7 @@ class Hist(AccumulatorABC): Note ---- The reserved keyword ``weight``, if specified, will increment sum of weights - by the given column values, which must have the same dimension as all other + by the given column values, which must be broadcastable to the same dimension as all other columns. Upon first use, this will trigger the storage of the sum of squared weights. @@ -890,6 +890,9 @@ class Hist(AccumulatorABC): if not all(d.name in values for d in self._axes): missing = ", ".join(d.name for d in self._axes if d.name not in values) raise ValueError("Not all axes specified for %r. Missing: %s" % (self, missing)) + if not all(name in self._axes or name == 'weight' for name in values): + extra = ", ".join(name for name in values if not (name in self._axes or name == 'weight')) + raise ValueError("Unrecognized axes specified for %r. Extraneous: %s" % (self, extra)) if "weight" in values and self._sumw2 is None: self._init_sumw2()
Update speech_model_adaptation_beta_test.py chore: shorten the ids.
@@ -40,7 +40,8 @@ def test_model_adaptation_beta(custom_class_id, phrase_set_id, capsys): @pytest.fixture def custom_class_id(): - custom_class_id = f"customClassId{uuid.uuid4()}" + # The custom class id can't be too long + custom_class_id = f"customClassId{str(uuid.uuid4())[:8]}" yield custom_class_id # clean up resources CLASS_PARENT = ( @@ -51,7 +52,8 @@ def custom_class_id(): @pytest.fixture def phrase_set_id(): - phrase_set_id = f"phraseSetId{uuid.uuid4()}" + # The phrase set id can't be too long + phrase_set_id = f"phraseSetId{str(uuid.uuid4())[:8]}" yield phrase_set_id # clean up resources PHRASE_PARENT = (
[docs] mention IO manager docs in the migration guide Summary: Test Plan: NA Reviewers: sandyryza, cdecarolis
@@ -37,7 +37,8 @@ into any problems with new new scheduler. We have deprecated "intermediate storage". Loading inputs and storing outputs are now handled by "IO managers", which serve the same purpose as intermediate storages but offer you better -control over how inputs are loaded and outputs are handled. +control over how inputs are loaded and outputs are handled. Check out the +[IO Managers Overview](https://docs.dagster.io/0.10.0/overview/io-managers/io-managers) for more information. - We have deprecated the field `"storage"` and `"intermediate_storage"` on run config.
operations.regen: collapse repo pkgs to list before regen starts To avoid threading issues arising from scanning for matches inside various threads since the iterator isn't thread-safe.
@@ -32,8 +32,11 @@ def regen_repository(repo, observer, threads=1, pkg_attr='keywords', **kwargs): helpers.append(helper) return helper - # force usage of unfiltered repo to include pkgs with metadata issues - pkgs = repo.itermatch(packages.AlwaysTrue, pkg_filter=None) + # Force usage of unfiltered repo to include pkgs with metadata issues. + # Matches are collapsed directly to a list to avoid threading issues such + # as EBADF since the repo iterator isn't thread-safe. + pkgs = list(repo.itermatch(packages.AlwaysTrue, pkg_filter=None)) + def get_args(): return (_get_repo_helper(), observer)
fix tests due to changes in the loading mechansim they need to be fethced now
@@ -19,6 +19,7 @@ from .templatetags import inventree_extras import part.settings from common.models import InvenTreeSetting, NotificationEntry, NotificationMessage +from common.notifications import storage class TemplateTagTest(TestCase): @@ -494,6 +495,9 @@ class BaseNotificationIntegrationTest(TestCase): self.part = Part.objects.get(name='R_2K2_0805') def _notification_run(self): + # reload notification methods + storage.collect() + # There should be no notification runs self.assertEqual(NotificationEntry.objects.all().count(), 0)
remove requests package install in tests We're only interested in testing if the virtual environment can be successfully created.
@@ -59,7 +59,7 @@ def test_venv_file_with_name(PipenvInstance, pypi): if 'PIPENV_VENV_IN_PROJECT' in os.environ: del os.environ['PIPENV_VENV_IN_PROJECT'] - c = p.pipenv('install requests') + c = p.pipenv('install') assert c.return_code == 0 venv_loc = Path(p.pipenv('--venv').out.strip()) @@ -80,7 +80,7 @@ def test_venv_file_with_path(PipenvInstance, pypi): with open(file_path, "w") as f: f.write(venv_path.name) - c = p.pipenv("install requests") + c = p.pipenv("install") assert c.return_code == 0 venv_loc = Path(p.pipenv('--venv').out.strip())
Update the changes made to _request method Forgot to include 'return'
@@ -47,7 +47,7 @@ def _request(blink, url='http://google.com', data=None, headers=None, (response.json()['code'], response.json()['message'])) else: headers = _attempt_reauthorization(blink) - _request(blink, url=url, data=data, headers=headers, + return _request(blink, url=url, data=data, headers=headers, reqtype=reqtype, stream=stream, json_resp=json_resp, is_retry=True)
SSYNC: log body of errors in SSYNC subrequests It helps an operator to understand why an SSYNC replication is not progressing on a specific partition.
@@ -478,8 +478,8 @@ class Receiver(object): successes += 1 else: self.app.logger.warning( - 'ssync subrequest failed with %s: %s %s' % - (resp.status_int, method, subreq.path)) + 'ssync subrequest failed with %s: %s %s (%s)' % + (resp.status_int, method, subreq.path, resp.body)) failures += 1 if failures >= self.app.replication_failure_threshold and ( not successes or
LandBOSSE second integration Set all lift heights to be equal to the hub height.
@@ -661,12 +661,17 @@ class LandBOSSE_API(om.ExplicitComponent): # Make the hub hub_mass_kg = inputs['hub_mass'][0] hub = input_components[input_components['Component'].str.startswith('Hub')].iloc[0].copy() + hub['Lift height m'] = hub_height_meters if hub_mass_kg != use_default_component_data: hub['Mass tonne'] = hub_mass_kg / kg_per_tonne output_components_list.append(hub) # Make blades blade = input_components[input_components['Component'].str.startswith('Blade')].iloc[0].copy() + + # There is always a hub height, so use that as the lift height + blade['Lift height m'] = hub_height_meters + if inputs['blade_drag_coefficient'][0] != use_default_component_data: blade['Coeff drag'] = inputs['blade_drag_coefficient'][0] @@ -677,7 +682,7 @@ class LandBOSSE_API(om.ExplicitComponent): blade['Cycle time installation hrs'] = inputs['blade_install_cycle_time'][0] if inputs['blade_offload_hook_height'][0] != use_default_component_data: - blade['Offload hook height m'] = inputs['blade_offload_hook_height'][0] + blade['Offload hook height m'] = hub_height_meters if inputs['blade_offload_cycle_time'][0] != use_default_component_data: blade['Offload cycle time hrs'] = inputs['blade_offload_cycle_time'] @@ -702,7 +707,7 @@ class LandBOSSE_API(om.ExplicitComponent): # Make tower sections tower_mass_tonnes = inputs['tower_mass'][0] / kg_per_tonne tower_section_length_m = inputs['tower_section_length_m'][0] - tower_height_m = inputs['hub_height_meters'][0] - inputs['foundation_height'][0] + tower_height_m = hub_height_meters - inputs['foundation_height'][0] complete_tower_sections = int(tower_height_m // tower_section_length_m) incomplete_tower_section_m = tower_height_m % tower_section_length_m tower_sections = [tower_section_length_m] * complete_tower_sections
Update README.rst Fix logo spacing that was accidentally committed to main. :/
@@ -28,7 +28,7 @@ on existing infrastructure including clouds, clusters, and supercomputers. .. image:: docs/img/funcx-logo.png :target: https://www.funcx.org - :width: 400 + :width: 200 Website: https://www.funcx.org
CI: Remove multiple hashFiles instances in a single step. hashFiles supports passing multiple filenames, and using this feature results in much cleaner keys. Fixes:
@@ -69,7 +69,7 @@ jobs: uses: actions/cache@v3 with: path: /srv/zulip-npm-cache - key: v1-yarn-deps-${{ matrix.os }}-${{ hashFiles('package.json') }}-${{ hashFiles('yarn.lock') }} + key: v1-yarn-deps-${{ matrix.os }}-${{ hashFiles('package.json', 'yarn.lock') }} restore-keys: v1-yarn-deps-${{ matrix.os }} - name: Restore python cache @@ -83,7 +83,7 @@ jobs: uses: actions/cache@v3 with: path: /srv/zulip-emoji-cache - key: v1-emoji-${{ matrix.os }}-${{ hashFiles('tools/setup/emoji/emoji_map.json') }}-${{ hashFiles('tools/setup/emoji/build_emoji') }}-${{ hashFiles('tools/setup/emoji/emoji_setup_utils.py') }}-${{ hashFiles('tools/setup/emoji/emoji_names.py') }}-${{ hashFiles('package.json') }} + key: v1-emoji-${{ matrix.os }}-${{ hashFiles('tools/setup/emoji/emoji_map.json', 'tools/setup/emoji/build_emoji', 'tools/setup/emoji/emoji_setup_utils.py', 'tools/setup/emoji/emoji_names.py', 'package.json') }} restore-keys: v1-emoji-${{ matrix.os }} - name: Install dependencies
output_padding argument for ConvTranspose2D Sometime it is necessary to use it in order to make the ConvTraspose2D to recover the original input size of the corresponding Conv2D.
@@ -1277,6 +1277,7 @@ class ConvTranspose2D(nn.Module): activation=torch.relu_, strides=1, padding=0, + output_padding=0, use_bias=None, use_bn=False, kernel_initializer=None, @@ -1295,6 +1296,9 @@ class ConvTranspose2D(nn.Module): activation (torch.nn.functional): strides (int or tuple): padding (int or tuple): + output_padding (int or tuple): Additional size added to one side of + each dimension in the output shape. Default: 0. See pytorch + documentation for more detail. use_bias (bool|None): If None, will use ``not use_bn`` use_bn (bool): whether use batch normalization kernel_initializer (Callable): initializer for the conv_trans layer. @@ -1315,6 +1319,7 @@ class ConvTranspose2D(nn.Module): kernel_size, stride=strides, padding=padding, + output_padding=output_padding, bias=use_bias) if kernel_initializer is None: variance_scaling_init(
Adds SPARSE DATA print stmt and commented-out logl-term checks. (added to core.py, used in debugging)
@@ -1207,6 +1207,7 @@ def do_mc2gst(dataset, startModel, circuitsToUse, firsts = _np.array(firsts, 'i') indicesOfCircuitsWithOmittedData = _np.array(indicesOfCircuitsWithOmittedData, 'i') dprobs_omitted_rowsum = _np.empty((len(firsts), vec_gs_len), 'd') + printer.log("SPARSE DATA: %d of %d rows have sparse data" % (len(firsts), len(circuitsToUse))) else: firsts = None # no omitted probs @@ -2537,6 +2538,11 @@ def _do_mlgst_base(dataset, startModel, circuitsToUse, # set 0 * log(0) terms explicitly to zero since numpy doesn't know this limiting behavior #freqTerm[cntVecMx == 0] = 0.0 + #CHECK OBJECTIVE FN + #max_logL_terms = _tools.logl_max_terms(mdl, dataset, dsCircuitsToUse, + # poissonPicture, opLabelAliases, evaltree_cache) + #print("DIFF1 = ",abs(_np.sum(max_logL_terms) - _np.sum(freqTerm))) + min_p = minProbClip a = radius # parameterizes "roundness" of f == 0 terms @@ -2613,6 +2619,12 @@ def _do_mlgst_base(dataset, startModel, circuitsToUse, _np.where(omitted_probs >= a, omitted_probs, (-1.0 / (3 * a**2)) * omitted_probs**3 + omitted_probs**2 / a + a / 3.0) + #CHECK OBJECTIVE FN + #logL_terms = _tools.logl_terms(mdl, dataset, circuitsToUse, + # min_p, probClipInterval, a, poissonPicture, False, + # opLabelAliases, evaltree_cache) # v = maxL - L so L + v - maxL should be 0 + #print("DIFF2 = ",_np.sum(logL_terms), _np.sum(v), _np.sum(freqTerm), abs(_np.sum(logL_terms) + _np.sum(v)-_np.sum(freqTerm))) + v = _np.sqrt(v) v.shape = [KM] # reshape ensuring no copy is needed if cptp_penalty_factor != 0:
Update unscanned_table_summary.sql added type to number of nodes, changed storage size calculations
@@ -16,20 +16,29 @@ truely unscanned over longer periods of time. History: 2016-01-18 chriz-bigdata created +2020-11-17 marynap chnaged storage calculation **********************************************************************************************/ + WITH - nodes AS (SELECT COUNT(DISTINCT node) nodenum FROM stv_slices), - slices AS (SELECT COUNT(DISTINCT slice) slices FROM stv_slices s WHERE node=0), - disks AS (SELECT COUNT(p.owner) disks FROM stv_partitions p WHERE p.owner=0), - storage AS ( - SELECT - nodes.nodenum * (CASE - WHEN slices.slices = 32 THEN 2.56 - WHEN slices.slices = 16 THEN 16.00 - WHEN disks.disks > 2 THEN 2 - ELSE 0.16 END) AS total_storage - FROM - nodes, slices, disks), + nodes AS (SELECT COUNT(DISTINCT node) nodenum FROM stv_slices WHERE TYPE = 'D'), + storage as (SELECT nodes.nodenum * ( CASE + WHEN capacity IN (381407, 190633, 361859) + THEN 160 / 1024 + WHEN capacity IN (380319, 760956) + THEN 2.56 + WHEN capacity IN (1906314, 952455) + THEN 2 + WHEN capacity = 945026 + THEN 16 + WHEN capacity = 3339176 + THEN 64 + ELSE NULL + END::float ) AS total_storage + FROM stv_partitions, nodes + WHERE part_begin = 0 + AND failed = 0 + group by 1 + ), table_scans AS ( SELECT database, @@ -67,4 +76,3 @@ SELECT num_unscanned_tables || ' unscanned tables @ ' || size_unscanned_tables || 'TB / ' || total_storage || 'TB (' || ROUND(100*(size_unscanned_tables::float/total_storage::float),1) || '%)' AS unscanned_table_storage FROM scan_aggs; -
Unlink the BIT-Vehicle dataset Its webpage has been down for a couple of weeks now, and it's breaking our link checking job. We'll have to remove the link, at least for now.
@@ -25,7 +25,7 @@ the "Barrier" use case. Average Precision (AP) is defined as an area under the [precision/recall](https://en.wikipedia.org/wiki/Precision_and_recall) -curve. Validation dataset is [BIT-Vehicle](http://iitlab.bit.edu.cn/mcislab/vehicledb/). +curve. Validation dataset is BIT-Vehicle. ## Performance
Modify incorrect registry test cases Registry object has no attributes 'cpuset' and 'cpu_policy'
@@ -26,9 +26,7 @@ class TestRegistryObject(base.DbTestCase): def setUp(self): super(TestRegistryObject, self).setUp() - self.fake_cpuset = utils.get_cpuset_dict() - self.fake_registry = utils.get_test_registry( - cpuset=self.fake_cpuset, cpu_policy='dedicated') + self.fake_registry = utils.get_test_registry() def test_get_by_uuid(self): uuid = self.fake_registry['uuid']
Update kubernetes authentication example to use the current API call rather than the deprecated one.
@@ -9,4 +9,4 @@ Authentication # Kubernetes (from k8s pod) f = open('/var/run/secrets/kubernetes.io/serviceaccount/token') jwt = f.read() - client.auth_kubernetes("example", jwt) + client.auth.kubernetes.login("example", jwt)
fix code example for pyspark_resource Test Plan: bk Reviewers: nate, max
@@ -34,8 +34,19 @@ def pyspark_resource(init_context): Example: - .. literalinclude:: ../../../../../examples/basic_pyspark/repo.py - :language: python + .. code-block:: python + @solid(required_resource_keys={"pyspark"}) + def my_solid(context): + spark_session = context.pyspark.spark_session + dataframe = spark_session.read.json("examples/src/main/resources/people.json") + + my_pyspark_resource = pyspark_resource.configured( + {"spark_conf": {"spark.executor.memory": "2g"}} + ) + + @pipeline(mode_defs=[ModeDefinition(resource_defs={"pyspark"})]) + def my_pipeline(): + my_solid() """ return PySparkResource(init_context.resource_config["spark_conf"])
Fix datasourceproviders to deserialize as text * Fix 2970 issue with datasources where the DatasourceProvider output was being deserialized as raw instead of text
@@ -1076,4 +1076,4 @@ def serialize_datasource_provider(obj, root): @deserializer(DatasourceProvider) def deserialize_datasource_provider(_type, data, root): - return SerializedRawOutputProvider(data["relative_path"], root) + return SerializedOutputProvider(data["relative_path"], root)
WordPress exclusion profile improvements rule should skip over the WP rules in phase:2 when inactive add more wp_http_referer FP exclusions nav-menus should also be excluded on edit add more exclusions to nav-menus
@@ -270,7 +270,7 @@ SecRule REQUEST_FILENAME "!@contains /wp-admin/" \ SecRule REQUEST_FILENAME "!@contains /wp-admin/" \ "id:9002401,\ - phase:1,\ + phase:2,\ pass,\ t:none,\ nolog,\ @@ -411,21 +411,23 @@ SecAction \ nolog,\ ctl:ruleRemoveTargetById=920230;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=931130;ARGS:_wp_http_referer,\ - ctl:ruleRemoveTargetById=932200;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=932150;ARGS:_wp_http_referer,\ + ctl:ruleRemoveTargetById=932200;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=941100;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=942130;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=942200;ARGS:_wp_http_referer,\ + ctl:ruleRemoveTargetById=942230;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=942260;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=942431;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=942440;ARGS:_wp_http_referer,\ ctl:ruleRemoveTargetById=920230;ARGS:wp_http_referer,\ ctl:ruleRemoveTargetById=931130;ARGS:wp_http_referer,\ - ctl:ruleRemoveTargetById=932200;ARGS:wp_http_referer,\ ctl:ruleRemoveTargetById=932150;ARGS:wp_http_referer,\ + ctl:ruleRemoveTargetById=932200;ARGS:wp_http_referer,\ ctl:ruleRemoveTargetById=941100;ARGS:wp_http_referer,\ ctl:ruleRemoveTargetById=942130;ARGS:wp_http_referer,\ ctl:ruleRemoveTargetById=942200;ARGS:wp_http_referer,\ + ctl:ruleRemoveTargetById=942230;ARGS:wp_http_referer,\ ctl:ruleRemoveTargetById=942260;ARGS:wp_http_referer,\ ctl:ruleRemoveTargetById=942431;ARGS:wp_http_referer,\ ver:'OWASP_CRS/3.3.0'" @@ -491,12 +493,15 @@ SecRule REQUEST_FILENAME "@endsWith /wp-admin/nav-menus.php" \ nolog,\ ver:'OWASP_CRS/3.3.0',\ chain" - SecRule ARGS:action "@streq update" \ + SecRule ARGS:action "@rx ^(?:update|edit)$" \ "t:none,\ chain" SecRule &ARGS:action "@eq 1" \ "t:none,\ + ctl:ruleRemoveTargetById=932200;ARGS,\ + ctl:ruleRemoveTargetById=932150;ARGS,\ ctl:ruleRemoveTargetById=942460;ARGS:menu-name,\ + ctl:ruleRemoveTargetById=932200;ARGS:nav-menu-data,\ ctl:ruleRemoveTargetById=941330;ARGS:nav-menu-data,\ ctl:ruleRemoveTargetById=941340;ARGS:nav-menu-data,\ ctl:ruleRemoveTargetById=942200;ARGS:nav-menu-data,\
Update mkvtomp4.py fix case sensitivity issues
@@ -845,7 +845,7 @@ class MkvtoMp4: x, lang = os.path.splitext(subname) while '.forced' in lang or '.default' in lang or lang.replace('.', "").isdigit(): x, lang = os.path.splitext(x) - lang = lang[1:] + lang = lang[1:].lower() # Using bablefish to convert a 2 language code to a 3 language code if len(lang) == 2: try: