message
stringlengths
13
484
diff
stringlengths
38
4.63k
README: fix URLs to point to the right project When I copied and pasted here I failed to fix the project name.
pyrax ===== -.. image:: https://img.shields.io/pypi/v/jira.svg - :target: https://pypi.python.org/pypi/jira/ +.. image:: https://img.shields.io/pypi/v/pyrax.svg + :target: https://pypi.python.org/pypi/pyrax/ -.. image:: https://travis-ci.com/pycontribs/jira.svg?branch=master - :target: https://travis-ci.com/pycontribs/jira +.. image:: https://travis-ci.com/pycontribs/pyrax.svg?branch=master + :target: https://travis-ci.com/pycontribs/pyrax Python SDK for OpenStack/Rackspace APIs
Add RandomForest to the baseline Random Forest is included because it's the simplest example of a framework being executed in a subprocess with its own virtual environment.
@@ -83,6 +83,7 @@ jobs: if: needs.detect_changes.outputs.skip_baseline == 0 strategy: matrix: + framework: [constantpredictor, randomforest] task: [iris, kc2, cholesterol] fail-fast: false steps: @@ -110,7 +111,7 @@ jobs: - name: Run constantpredictor on openml iris run: | source venv/bin/activate - python runbenchmark.py constantpredictor -t ${{ matrix.task }} -f 0 -e + python runbenchmark.py ${{ matrix.framework }} -t ${{ matrix.task }} -f 0 -e run_frameworks: name: ${{ matrix.framework }}/${{ matrix.task }}
Update v_connection_summary.sql Fixed duration calculation for 'Connection Lost' sessions
@@ -16,7 +16,7 @@ trim(a.dbname) as dbname, trim(c.application_name) as app_name, trim(b.authmethod) as authmethod, case when d.duration > 0 then (d.duration/1000000)/86400||' days '||((d.duration/1000000)%86400)/3600||'hrs ' -||((d.duration/1000000)%3600)/60||'mins '||(d.duration/1000000%60)||'secs' else datediff(s,a.recordtime,getdate())/86400||' days '||(datediff(s,a.recordtime,getdate())%86400)/3600||'hrs ' +||((d.duration/1000000)%3600)/60||'mins '||(d.duration/1000000%60)||'secs' when f.process is null then null else datediff(s,a.recordtime,getdate())/86400||' days '||(datediff(s,a.recordtime,getdate())%86400)/3600||'hrs ' ||(datediff(s,a.recordtime,getdate())%3600)/60||'mins '||(datediff(s,a.recordtime,getdate())%60)||'secs' end as duration, b.mtu, trim(b.sslversion) as sslversion,
util: python: Add within_method() helper function Checks if a caller is being called from a given method of a given object.
@@ -3,6 +3,7 @@ Python specific helper functions """ import types import pathlib +import inspect import importlib from typing import Optional, Callable, Union, Tuple, Iterator @@ -80,3 +81,97 @@ def modules( continue # Import module yield import_name, importlib.import_module(import_name) + + +# See comment at beginning of within_method() +IN_IPYTHON = False +CHECKED_IN_IPYTHON = False +IPYTHON_INSPECT_PATCHED = False + + +def within_method(obj: object, method_name: str, max_depth: int = -1) -> bool: + """ + Return True if a caller is being called from a given method of a given + object. + + Parameters + ---------- + obj : object + Check if we are within a method of this object. + method_name : str + Check if we are within a method by this name. + max_depth : int, optional (-1) + Stop checking stack frames after we have checked this many. + + Returns + ------- + within : boolean + True if the calling function is being called from within the method + given bound to the object given. + + Examples + -------- + + >>> from dffml import within_method + >>> + >>> class FirstClass: + ... def feedface(self): + ... print(within_method(self, "__init__", max_depth=3)) + ... + >>> first = FirstClass() + >>> first .feedface() + False + >>> + >>> class SecondClass(FirstClass): + ... def __init__(self): + ... self.feedface() + ... + >>> second = SecondClass() + True + >>> + >>> class ThirdClass(SecondClass): + ... def __init__(self): + ... self.deadbeef() + ... + ... def deadbeef(self): + ... self.feedface() + ... + >>> third = ThirdClass() + False + """ + # HACK Fix for if we are running in IPython notebook. Sometimes it doesn't + # patch inspect.findsource as is intended + # References: + # - https://github.com/ipython/ipython/issues/1456 + # - https://github.com/ipython/ipython/commit/298fdab5025745cd25f7f48147d8bc4c65be9d4a#diff-3a77d00d5690f670e9ac680f06b8ffe7ca902c6d325673f32e719d8e55b11ae3R209 + global IN_IPYTHON + global CHECKED_IN_IPYTHON + global IPYTHON_INSPECT_PATCHED + if not CHECKED_IN_IPYTHON: + try: + get_ipython() + IN_IPYTHON = True + except: + pass + CHECKED_IN_IPYTHON = True + if IN_IPYTHON and not IPYTHON_INSPECT_PATCHED: + import IPython.core.ultratb + + inspect.findsource = IPython.core.ultratb.findsource + IPYTHON_INSPECT_PATCHED = False + # Grab stack frames + try: + frames = inspect.stack() + except ImportError: + # HACK ImportError Fix for lazy_import rasing on autosklearn/smac: emcee + return True + for i, frame_info in enumerate(frames): + if max_depth != -1 and i >= max_depth: + break + if ( + frame_info.function == method_name + and "self" in frame_info.frame.f_locals + and frame_info.frame.f_locals["self"] is obj + ): + return True + return False
stream_edit.js: Add `rerender_subscribers_list()` function. This function can be used for updating the subscribers list correctly when a subscriber is added or removed.
@@ -23,6 +23,15 @@ function get_email_of_subscribers(subscribers) { return emails; } +function rerender_subscribers_list(sub) { + var emails = get_email_of_subscribers(sub.subscribers); + var subscribers_list = list_render.get("stream_subscribers/" + sub.stream_id); + + // Changing the data clears the rendered list and the list needs to be re-rendered. + subscribers_list.data(emails); + subscribers_list.render(); +} + exports.collapse = function (sub) { // I am not sure whether this code is really correct; it was extracted // from subs.update_settings_for_unsubscribed() and possibly pre-dates
Set keepalived branch to master The latest keepalived role tag (7.0.0) is too old to have incorporated fixes outlined in that are required for a functioning multi-node keepalived/haproxy setup.
- name: keepalived scm: git src: https://github.com/evrardjp/ansible-keepalived - version: 7.0.0 - trackbranch: None + version: 460fc120b8263bcafc996a3859c9c005fb434447 + trackbranch: master shallow_since: '2022-05-03' - name: lxc_container_create scm: git
Remove undefined parameter from docstring It looks like the ability to pass the `config` parameter was removed but the docstring explaining what it did stayed
@@ -47,14 +47,7 @@ class ElastAlerter(object): """ The main ElastAlert runner. This class holds all state about active rules, controls when queries are run, and passes information between rules and alerts. - :param args: An argparse arguments instance. Should contain debug and start - - :param conf: The configuration dictionary. At the top level, this - contains global options, and under 'rules', contains all state relating - to rules and alerts. In each rule in conf['rules'], the RuleType and Alerter - instances live under 'type' and 'alerts', respectively. The conf dictionary - should not be passed directly from a configuration file, but must be populated - by config.py:load_rules instead. """ + :param args: An argparse arguments instance. Should contain debug and start""" thread_data = threading.local()
add container_types.make_dict, c.f. no tests (since there are no tests for make_list or make_tuple either, though we probably should add some)
@@ -110,6 +110,15 @@ def dict_untake(x, idx, template): dict_untake.defvjp(lambda g, ans, vs, gvs, x, idx, template : dict_take(g, idx)) dict_untake.defvjp_is_zero(argnums=(1, 2)) +def make_dict(pairs): + keys, vals = zip(*pairs) + return _make_dict(make_list(*keys), make_list(*vals)) +@primitive +def _make_dict(keys, vals): + return dict(zip(keys, vals)) +_make_dict.defvjp(lambda g, ans, vs, gvs, keys, vals: [g[key] for key in keys], + argnum=1) + class DictVSpace(VSpace): def __init__(self, value): self.shape = {k : vspace(v) for k, v in iteritems(value)}
Update morse_code_implementation.py * Update morse_code_implementation.py Added more characters to MORSE_CODE_DICT for a more complete dictionary. Split words with "/" instead of a space as is standard. Fixed bug when encrypting a message with a comma. * Fixed comment typo
# Python program to implement Morse Code Translator - # Dictionary representing the morse code chart MORSE_CODE_DICT = { "A": ".-", @@ -39,13 +38,22 @@ MORSE_CODE_DICT = { "8": "---..", "9": "----.", "0": "-----", + "&": ".-...", + "@": ".--.-.", + ":": "---...", ",": "--..--", ".": ".-.-.-", + "'": ".----.", + '"': ".-..-.", "?": "..--..", "/": "-..-.", + "=": "-...-", + "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", + # Exclamation mark is not in ITU-R recommendation + "!": "-.-.--", } @@ -53,42 +61,24 @@ def encrypt(message): cipher = "" for letter in message: if letter != " ": - cipher += MORSE_CODE_DICT[letter] + " " else: + cipher += "/ " - cipher += " " - - return cipher + # Remove trailing space added on line 64 + return cipher[:-1] def decrypt(message): - - message += " " - decipher = "" - citext = "" - for letter in message: - - if letter != " ": - - i = 0 - - citext += letter - - else: - - i += 1 - - if i == 2: - - decipher += " " - else: - + letters = message.split(" ") + for letter in letters: + if letter != "/": decipher += list(MORSE_CODE_DICT.keys())[ - list(MORSE_CODE_DICT.values()).index(citext) + list(MORSE_CODE_DICT.values()).index(letter) ] - citext = "" + else: + decipher += " " return decipher
Scons: Catch segfault and try to make a helpful report * With informative URLs added this will become even better, but for now it will do.
@@ -118,7 +118,7 @@ def _filterMsvcLinkOutput(env, module_mode, data, exit_code): # To work around Windows not supporting command lines of greater than 10K by # default: -def getWindowsSpawnFunction(env, module_mode, source_files): +def _getWindowsSpawnFunction(env, module_mode, source_files): def spawnWindowsCommand( sh, escape, cmd, args, os_env ): # pylint: disable=unused-argument @@ -322,23 +322,30 @@ def runSpawnMonitored(sh, cmd, args, env): return thread.getSpawnResult() -def getWrappedSpawnFunction(): - def spawnCommand(sh, escape, cmd, args, env): +def _getWrappedSpawnFunction(env): + def spawnCommand(sh, escape, cmd, args, _env): # signature needed towards Scons core, pylint: disable=unused-argument # Avoid using ccache on binary constants blob, not useful and not working # with old ccache. if '"__constants_data.o"' in args or '"__constants_data.os"' in args: - env = dict(env) - env["CCACHE_DISABLE"] = "1" + _env = dict(_env) + _env["CCACHE_DISABLE"] = "1" - result, exception = runSpawnMonitored(sh, cmd, args, env) + result, exception = runSpawnMonitored(sh, cmd, args, _env) if exception: closeSconsProgressBar() raise exception + # Segmentation fault should give a clear error. + if result == -11: + scons_logger.sysexit( + "Error, the C compiler '%s' crashed with segfault. Consider upgrading it or using --clang option." + % env.the_compiler + ) + return result return spawnCommand @@ -346,8 +353,8 @@ def getWrappedSpawnFunction(): def enableSpawnMonitoring(env, win_target, module_mode, source_files): if win_target: - env["SPAWN"] = getWindowsSpawnFunction( + env["SPAWN"] = _getWindowsSpawnFunction( env=env, module_mode=module_mode, source_files=source_files ) else: - env["SPAWN"] = getWrappedSpawnFunction() + env["SPAWN"] = _getWrappedSpawnFunction(env=env)
More visible telnet conch message Capture traceback when trying to import required twisted modules, print it in case telnet is enabled, and mention settings variable that can be used to supress the message. Thanks
@@ -6,6 +6,7 @@ See documentation in docs/topics/telnetconsole.rst import pprint import logging +import traceback from twisted.internet import protocol try: @@ -13,6 +14,7 @@ try: from twisted.conch.insults import insults TWISTED_CONCH_AVAILABLE = True except (ImportError, SyntaxError): + _TWISTED_CONCH_TRACEBACK = traceback.format_exc() TWISTED_CONCH_AVAILABLE = False from scrapy.exceptions import NotConfigured @@ -40,8 +42,9 @@ class TelnetConsole(protocol.ServerFactory): if not crawler.settings.getbool('TELNETCONSOLE_ENABLED'): raise NotConfigured if not TWISTED_CONCH_AVAILABLE: - raise NotConfigured('TelnetConsole not enabled: failed to import ' - 'required twisted modules.') + raise NotConfigured( + 'TELNETCONSOLE_ENABLED setting is True but required twisted ' + 'modules failed to import:\n' + _TWISTED_CONCH_TRACEBACK) self.crawler = crawler self.noisy = False self.portrange = [int(x) for x in crawler.settings.getlist('TELNETCONSOLE_PORT')]
fw/output: Implement retriving "augmentations" for `JobDatabaseOutput`s Enable retriving augmentations on a per job basis when using a Postgres database backend.
@@ -1010,6 +1010,7 @@ class RunDatabaseOutput(DatabaseOutput, RunOutputCommon): jobs = self._read_db(columns, tables, conditions) for job in jobs: + job['augmentations'] = self._get_job_augmentations(job['oid']) job['workload_parameters'] = workload_params.pop(job['oid'], {}) job['runtime_parameters'] = runtime_params.pop(job['oid'], {}) job.pop('oid') @@ -1173,6 +1174,15 @@ class RunDatabaseOutput(DatabaseOutput, RunOutputCommon): logger.debug('Failed to deserialize job_oid:{}-"{}":"{}"'.format(job_oid, k, v)) return parm_dict + def _get_job_augmentations(self, job_oid): + columns = ['jobs_augs.augmentation_oid', 'augmentations.name', + 'augmentations.oid', 'jobs_augs.job_oid'] + tables = ['jobs_augs', 'augmentations'] + conditions = ['jobs_augs.job_oid = \'{}\''.format(job_oid), + 'jobs_augs.augmentation_oid = augmentations.oid'] + augmentations = self._read_db(columns, tables, conditions) + return [aug['name'] for aug in augmentations] + def _list_runs(self): columns = ['runs.run_uuid', 'runs.run_name', 'runs.project', 'runs.project_stage', 'runs.status', 'runs.start_time', 'runs.end_time'] @@ -1224,3 +1234,11 @@ class JobDatabaseOutput(DatabaseOutput): def __str__(self): return '{}-{}-{}'.format(self.id, self.label, self.iteration) + + @property + def augmentations(self): + job_augs = set([]) + if self.spec: + for aug in self.spec.augmentations: + job_augs.add(aug) + return list(job_augs)
Handle weird GKE Kubernetes versions This commit handles weird non-standard versions that Kubernetes clusters running in GKE return like, "1.14+". Fix
@@ -92,6 +92,26 @@ def kube_version_json(): return json.loads(stdout) +def strip_version(ver: str): + """ + strip_version is needed to strip a major/minor version of non-standard symbols. For example, when working with GKE, + `kubectl version` returns a minor version like '14+', which is not semver or any standard version, for that matter. + So we handle exceptions like that here. + :param ver: version string + :return: stripped version + """ + + try: + return int(ver) + except ValueError as e: + # GKE returns weird versions with '+' in the end + if ver[-1] == '+': + return int(ver[:-1]) + + # If we still have not taken care of this, raise the error + raise ValueError(e) + + def kube_server_version(version_json=None): if not version_json: version_json = kube_version_json() @@ -99,8 +119,8 @@ def kube_server_version(version_json=None): server_json = version_json.get('serverVersion', {}) if server_json: - server_major = server_json.get('major', None) - server_minor = server_json.get('minor', None) + server_major = strip_version(server_json.get('major', None)) + server_minor = strip_version(server_json.get('minor', None)) return f"{server_major}.{server_minor}" else: @@ -114,8 +134,8 @@ def kube_client_version(version_json=None): client_json = version_json.get('clientVersion', {}) if client_json: - client_major = client_json.get('major', None) - client_minor = client_json.get('minor', None) + client_major = strip_version(client_json.get('major', None)) + client_minor = strip_version(client_json.get('minor', None)) return f"{client_major}.{client_minor}" else:
Refactor transposition of data Use the fact that all possible classes (ndarray, ListOfImages, DatasetView) have a .transpose() method.
@@ -265,32 +265,24 @@ class StackView(qt.QMainWindow): """ assert self._stack is not None assert 0 <= self._perspective < 3 + + # ensure we have the stack encapsulated in an array like object + # having a transpose() method if isinstance(self._stack, numpy.ndarray): - if self._perspective == 0: self.__transposed_view = self._stack - if self._perspective == 1: - self.__transposed_view = numpy.rollaxis(self._stack, 1) - if self._perspective == 2: - self.__transposed_view = numpy.rollaxis(self._stack, 2) + elif h5py is not None and isinstance(self._stack, h5py.Dataset) or \ isinstance(self._stack, DatasetView): - if self._perspective == 0: - self.__transposed_view = self._stack - if self._perspective == 1: - self.__transposed_view = DatasetView(self._stack, - transposition=(1, 0, 2)) - if self._perspective == 2: - self.__transposed_view = DatasetView(self._stack, - transposition=(2, 0, 1)) + self.__transposed_view = DatasetView(self._stack) + elif isinstance(self._stack, ListOfImages): - if self._perspective == 0: - self.__transposed_view = self._stack + self.__transposed_view = ListOfImages(self._stack) + + # transpose the array like object if necessary if self._perspective == 1: - self.__transposed_view = ListOfImages(self._stack.images, - transposition=(1, 0, 2)) - if self._perspective == 2: - self.__transposed_view = ListOfImages(self._stack.images, - transposition=(2, 0, 1)) + self.__transposed_view = self.__transposed_view.transpose((1, 0, 2)) + elif self._perspective == 2: + self.__transposed_view = self.__transposed_view.transpose((2, 0, 1)) self._browser.setRange(0, self.__transposed_view.shape[0] - 1) self._browser.setValue(0)
MAINT: Cast x to float explicitly in CubicHermiteSpline It fixes some problems in pandas, not that their support for scipy interpolators is very good
@@ -26,9 +26,9 @@ def prepare_input(x, y, axis, dydx=None): """ x, y = map(np.asarray, (x, y)) - if np.issubdtype(x.dtype, np.complexfloating): raise ValueError("`x` must contain real values.") + x = x.astype(float) if np.issubdtype(y.dtype, np.complexfloating): dtype = complex
Restore --print-found option. Now --print-all and --print-found complement each other. The default remains that only the found are reported.
@@ -476,9 +476,13 @@ def main(): "On the other hand, this may cause a long delay to gather all results." ) parser.add_argument("--print-all", - action="store_true", dest="print_all", default=False, + action="store_true", dest="print_all", help="Output sites where the username was not found." ) + parser.add_argument("--print-found", + action="store_false", dest="print_all", default=False, + help="Output sites where the username was found." + ) parser.add_argument("--no-color", action="store_true", dest="no_color", default=False, help="Don't color terminal output"
Fix bug created in package install by trying to set consistent api versions across calls
@@ -68,6 +68,7 @@ class CreatePackageZipBuilder(BasePackageZipBuilder): self._write_package_xml(package_xml) class InstallPackageZipBuilder(BasePackageZipBuilder): + api_version = '33.0' def __init__(self, namespace, version): if not namespace: @@ -80,7 +81,7 @@ class InstallPackageZipBuilder(BasePackageZipBuilder): def _populate_zip(self): package_xml = INSTALLED_PACKAGE_PACKAGE_XML.format( namespace=self.namespace, - version=self.version, + version=self.api_version, ) self._write_package_xml(package_xml)
test the names of the curves against the stored list We were testing the wrong version of the curve string. With this change on python-cryptography 2.6.1 and openssl 1.1.1c, we drop from 26 xfailed to 14 xfailed tests.
@@ -254,8 +254,8 @@ class TestPGPKey_Management(object): if not alg.can_gen: pytest.xfail('Key algorithm {} not yet supported'.format(alg.name)) - if isinstance(size, EllipticCurveOID) and ((not size.can_gen) or size.name not in _openssl_get_supported_curves()): - pytest.xfail('Curve {} not yet supported'.format(size.name)) + if isinstance(size, EllipticCurveOID) and ((not size.can_gen) or size.curve.name not in _openssl_get_supported_curves()): + pytest.xfail('Curve {} not yet supported'.format(size.curve.name)) key = self.keys[pkspec] subkey = PGPKey.new(*skspec) @@ -465,8 +465,8 @@ class TestPGPKey_Management(object): if not alg.can_gen: pytest.xfail('Key algorithm {} not yet supported'.format(alg.name)) - if isinstance(size, EllipticCurveOID) and ((not size.can_gen) or size.name not in _openssl_get_supported_curves()): - pytest.xfail('Curve {} not yet supported'.format(size.name)) + if isinstance(size, EllipticCurveOID) and ((not size.can_gen) or size.curve.name not in _openssl_get_supported_curves()): + pytest.xfail('Curve {} not yet supported'.format(size.curve.name)) # revoke the subkey key = self.keys[pkspec]
Updated bug report template SVG and GUI framework information only required for TexText 0.11
@@ -54,7 +54,11 @@ If applicable and helpful, add screenshots to help explain your problem. - TexText version: - Inkscape version: - Operating system: [e.g. Windows 10, 1803, 32-bit] -- SVG-converter installed (pstoedit+ghostscript or pdf2svg): -- GUI framework installed (PyGTK, PyGTK+PyGTK-Sourceview, TkInter): - Windows only: LaTeX-distribution (MiKTeX, TeX-Live): - Windows only: TexText installed via batch script or installer file: + +**Additional information if you still use Inkscape 0.92.x / TexText 0.11** + +- SVG-converter installed (pstoedit+ghostscript or pdf2svg): +- GUI framework installed (PyGTK, PyGTK+PyGTK-Sourceview, TkInter): +
Update index.html link to FAQ added
<!--<![endif]--> -<p>Read more <a href="{% url 'about' %}">about the site</a>. And please read our <a href="{% url 'caution' %}">guidelines for using this data</a>.</p> +<p>Read more <a href="{% url 'about' %}">about the site</a> and see our <a href="{% url 'faq' %}">FAQs</a>. And please read our <a href="{% url 'caution' %}">guidelines for using this data</a>.</p> {% endblock %}
check H5public.h instead of H5pubconf.h - print hdf5 version number info but don't check min version (this code is too fragile)
@@ -27,21 +27,13 @@ else: def check_hdf5version(hdf5_includedir): try: - f = open(os.path.join(hdf5_includedir, 'H5pubconf-64.h'), **open_kwargs) - except IOError: - try: - f = open(os.path.join(hdf5_includedir, 'H5pubconf-32.h'), - **open_kwargs) - except IOError: - try: - f = open(os.path.join(hdf5_includedir, 'H5pubconf.h'), - **open_kwargs) + f = open(os.path.join(hdf5_includedir, 'H5public.h'), **open_kwargs) except IOError: return None hdf5_version = None for line in f: - if line.startswith('#define H5_VERSION'): - hdf5_version = line.split()[2] + if line.startswith('#define H5_VERS_INFO'): + hdf5_version = line.split('"')[1] return hdf5_version @@ -281,13 +273,6 @@ def _populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs): global HDF5_incdir, HDF5_dir, HDF5_libdir if HAS_PKG_CONFIG: - dep = subprocess.Popen(['pkg-config', '--modversion', 'hdf5'], - stdout=subprocess.PIPE).communicate()[0] - - hdf5_version = dep.decode().strip() - if hdf5_version < _HDF5_MIN_VERSION: - raise ValueError('HDF5 version >= {} is required'.format(_HDF5_MIN_VERSION)) - dep = subprocess.Popen(['pkg-config', '--cflags', 'hdf5'], stdout=subprocess.PIPE).communicate()[0] inc_dirs.extend([str(i[2:].decode()) for i in dep.split() if @@ -314,7 +299,8 @@ def _populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs): else: HDF5_dir = direc HDF5_incdir = os.path.join(direc, 'include') - sys.stdout.write('HDF5 found in %s\n' % HDF5_dir) + sys.stdout.write('%s found in %s\n' % + (hdf5_version,HDF5_dir)) break if HDF5_dir is None: raise ValueError('did not find HDF5 headers') @@ -324,6 +310,9 @@ def _populate_hdf5_info(dirstosearch, inc_dirs, libs, lib_dirs): hdf5_version = check_hdf5version(HDF5_incdir) if hdf5_version is None: raise ValueError('did not find HDF5 headers in %s' % HDF5_incdir) + else: + sys.stdout.write('%s found in %s\n' % + (hdf5_version,HDF5_dir)) if HDF5_libdir is None and HDF5_dir is not None: HDF5_libdir = os.path.join(HDF5_dir, 'lib')
fix(stock_board_concept_em.py): fix stock_board_concept_hist_em interface fix stock_board_concept_hist_em interface
@@ -230,7 +230,7 @@ def index_value_hist_funddb( if __name__ == "__main__": stock_zh_index_hist_csindex_df = stock_zh_index_hist_csindex( - symbol="000859", start_date="20220410", end_date="20220709" + symbol="000832", start_date="20221122", end_date="20221123" ) print(stock_zh_index_hist_csindex_df)
Fix FP in MySQL data leakage. Use re2 compatible range expression. Added data file for regexp-assemble.py
@@ -352,7 +352,7 @@ SecRule TX:sql_error_match "@eq 1" \ ver:'OWASP_CRS/3.4.0-dev',\ severity:'CRITICAL',\ chain" - SecRule RESPONSE_BODY "@rx (?i)(?:supplied argument is not a valid MySQL|Column count doesn't match value count at row|mysql_fetch_array\(\)|on MySQL result index|You have an error in your SQL syntax;|You have an error in your SQL syntax near|MySQL server version for the right syntax to use|\[MySQL\]\[ODBC|Column count doesn't match|Table '[^']+' doesn't exist|SQL syntax.*MySQL|Warning.*mysql_.*|valid MySQL result|MySqlClient\.|ERROR [0-9]{4} \([A-Z0-9]{5}\):)" \ + SecRule RESPONSE_BODY "@rx (?i)(?:MyS(?:QL server version for the right syntax to use|qlClient\.)|(?:supplied argument is not a valid |SQL syntax.*)MySQL|Column count doesn't match(?: value count at row)?|(?:Table '[^']+' doesn't exis|valid MySQL resul)t|You have an error in your SQL syntax(?: near|;)|Warning.{1,10}mysql_(?:[a-z_()]{1,26})?|ERROR [0-9]{4} \([A-Z0-9]{5}\):|mysql_fetch_array\(\)|on MySQL result index|\[MySQL\]\[ODBC)" \ "capture,\ setvar:'tx.outbound_anomaly_score_pl1=+%{tx.critical_anomaly_score}',\ setvar:'tx.sql_injection_score=+%{tx.critical_anomaly_score}'"
[sync] remove `SnyEngine._dir_snapshot_with_mignore` move functionality inline
@@ -1582,7 +1582,9 @@ class SyncEngine: changes = [] snapshot_time = time.time() - snapshot = self._dir_snapshot_with_mignore(self.dropbox_path) + snapshot = DirectorySnapshot( + self.dropbox_path, listdir=self._scandir_with_mignore + ) lowercase_snapshot_paths: Set[str] = set() # don't use iterator here but pre-fetch all entries @@ -3406,7 +3408,7 @@ class SyncEngine: # add created and deleted events of children as appropriate - snapshot = self._dir_snapshot_with_mignore(local_path) + snapshot = DirectorySnapshot(local_path, listdir=self._scandir_with_mignore) lowercase_snapshot_paths = {x.lower() for x in snapshot.paths} local_path_lower = local_path.lower() @@ -3475,12 +3477,6 @@ class SyncEngine: if not self._is_mignore_path(self.to_dbx_path(f.path), f.is_dir()) ] - def _dir_snapshot_with_mignore(self, path: str) -> DirectorySnapshot: - return DirectorySnapshot( - path, - listdir=self._scandir_with_mignore, - ) - # ====================================================================================== # Workers for upload, download and connection monitoring threads
mgr: improve/fix disabled modules check Follow up on "disabled_modules" is always a list, it's the items in the list that can be dicts in mimic. Many ways to fix this, here's one.
- name: set _disabled_ceph_mgr_modules fact set_fact: - _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules | type_debug == 'list' %}[]{% elif _ceph_mgr_modules.disabled_modules | type_debug == 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}" + _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}" when: - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
make to nomake change logic
@@ -9,7 +9,7 @@ Optional command line arguments: -v, --version : version, defaults to latest -d, --dir : install directory, defaults to '~/.cmdstanpy -s (--silent) : install with /VERYSILENT instead of /SILENT for RTools - -m --make : install mingw32-make (Windows RTools 4.0 only) + -m --nomake : don't install mingw32-make (Windows RTools 4.0 only) """ import argparse import contextlib @@ -42,7 +42,7 @@ def usage(): -v (--version) :CmdStan version -d (--dir) : install directory -s (--silent) : install with /VERYSILENT instead of /SILENT for RTools - -m (--make) : install mingw32-make (Windows RTools 4.0 only) + -m (--nomake) : don't install mingw32-make (Windows RTools 4.0 only) -h (--help) : this message """ ) @@ -281,7 +281,7 @@ def main(): parser.add_argument('--version', '-v') parser.add_argument('--dir', '-d') parser.add_argument('--silent', '-s', action='store_true') - parser.add_argument('--make', '-m', action='store_true') + parser.add_argument('--nomake', '-m', action='store_false') args = parser.parse_args(sys.argv[1:]) toolchain = get_toolchain_name() @@ -322,7 +322,7 @@ def main(): toolchain_loc, toolchain_version + EXTENSION, version, silent ) if ( - vars(args)['make'] + vars(args)['nomake'] is None and (platform.system() == 'Windows') and (version in ('4.0', '4', '40')) ):
Pontoon: Update Chinese (China) (zh-CN) localization of AMO Localization authors: passionforlife
@@ -4,7 +4,7 @@ msgstr "" "Project-Id-Version: AMO\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2019-07-02 08:24+0000\n" -"PO-Revision-Date: 2019-07-10 03:27+0000\n" +"PO-Revision-Date: 2019-07-10 03:34+0000\n" "Last-Translator: passionforlife <[email protected]>\n" "Language-Team: Chinese Simplified, China <[email protected]>\n" "Language: zh_CN\n"
Nix config defaults here default specified in DatasourceConfigSchema
@@ -655,9 +655,6 @@ class BaseDataContext(object): runtime_environment={ "data_context": self }, - config_defaults={ - "module_name": "great_expectations.datasource" - } ) return datasource
CI: cache Python dependencies Reduces frequency of using pipenv to install dependencies in CI. Works by caching the entire Python directory. Only a full cache hit will skip the pipenv steps; a partial cache hit will still be followed by using pipenv to install from the pipfiles. * Disable pip cache
# https://aka.ms/yaml variables: + PIP_NO_CACHE_DIR: false PIPENV_HIDE_EMOJIS: 1 PIPENV_IGNORE_VIRTUALENVS: 1 PIPENV_NOSPIN: 1 @@ -12,7 +13,6 @@ jobs: vmImage: ubuntu-18.04 variables: - PIP_CACHE_DIR: ".cache/pip" PRE_COMMIT_HOME: $(Pipeline.Workspace)/pre-commit-cache BOT_API_KEY: foo BOT_SENTRY_DSN: blah @@ -29,11 +29,24 @@ jobs: versionSpec: '3.8.x' addToPath: true + - task: Cache@2 + displayName: 'Restore Python environment' + inputs: + key: python | $(Agent.OS) | "$(PythonVersion.pythonLocation)" | ./Pipfile | ./Pipfile.lock + restoreKeys: | + python | "$(PythonVersion.pythonLocation)" | ./Pipfile.lock + python | "$(PythonVersion.pythonLocation)" | ./Pipfile + python | "$(PythonVersion.pythonLocation)" + cacheHitVar: PY_ENV_RESTORED + path: $(PythonVersion.pythonLocation) + - script: pip install pipenv displayName: 'Install pipenv' + condition: and(succeeded(), ne(variables.PY_ENV_RESTORED, 'true')) - script: pipenv install --dev --deploy --system displayName: 'Install project using pipenv' + condition: and(succeeded(), ne(variables.PY_ENV_RESTORED, 'true')) # Create an executable shell script which replaces the original pipenv binary. # The shell script ignores the first argument and executes the rest of the args as a command.
remove unused import im a goofy goober
@@ -36,7 +36,6 @@ from corehq.apps.hqwebapp.doc_info import get_doc_info_by_id from corehq.apps.hqwebapp.templatetags.hq_shared_tags import pretty_doc_info from corehq.apps.linked_domain.applications import unlink_apps_in_domain from corehq.apps.linked_domain.const import ( - LINKED_MODELS, LINKED_MODELS_MAP, MODEL_APP, MODEL_FIXTURE,
[Data] Add codeowners to preprocessor tests Additional codeowners were added to ray/data/preprocessors package in This is a followup to add the same codeowners for preprocessors tests as well.
# Ray data. /python/ray/data/ @ericl @scv119 @clarkzinzow @jjyao @jianoaix @c21 /python/ray/data/preprocessors/ @clarkzinzow @jiaodong @Yard1 @bveeramani @matthewdeng @amogkam +/python/ray/data/tests/preprocessors/ @clarkzinzow @jiaodong @Yard1 @bveeramani @matthewdeng @amogkam /doc/source/data/ @ericl @scv119 @clarkzinzow @jjyao @jianoaix @maxpumperla @c21 @ray-project/ray-docs # Ray workflows.
[nixio] Avoid == comparison with quantities Comparing a Quantity using == currently raises warning (via numpy). Will raise error in the future. Performing instance check before comparing.
@@ -1127,7 +1127,9 @@ class NixIO(BaseIO): values = create_quantity(values, units) if len(values) == 1: values = values[0] - if values == "" and prop.definition == EMPTYANNOTATION: + if (not isinstance(values, pq.Quantity) and + values == "" and + prop.definition == EMPTYANNOTATION): values = list() neo_attrs[prop.name] = values neo_attrs["name"] = stringify(neo_attrs.get("neo_name"))
ebuild.processor: revert to single line ebd env export So non-file sending/sourcing works as expected when running phases where the tempdir isn't available.
@@ -691,7 +691,7 @@ class EbuildProcessor(object): # TODO: Move to using unprefixed lines to avoid leaking internal # variables to spawned commands once we use builtins for all commands # currently using pkgcore-ebuild-helper. - return '\n'.join(f"export {x}" for x in data) + return f"export {' '.join(data)}" def send_env(self, env_dict, async_req=False, tmpdir=None): """Transfer the ebuild's desired env (env_dict) to the running daemon.
rm technologies from test_heating_and_cooling speeds up these tests and avoid timeout in Julia, which currently throws errors (separate issue will be raised)
@@ -13,7 +13,8 @@ from reo.src.wind import WindSAMSDK, combine_wind_files post = {"Scenario": { - "timeout_seconds": 1, + "timeout_seconds": 600, + "optimality_tolerance": 1.0, "Site": { "latitude": 37.78, "longitude": -122.45, "Financial": { @@ -44,15 +45,6 @@ post = {"Scenario": { "chp_fuel_type": "natural_gas", "chp_fuel_blended_monthly_rates_us_dollars_per_mmbtu": [11.0]*12 }, - "CHP": { - "prime_mover": "recip_engine", - "min_kw": 10, - "max_kw": 5250, - "installed_cost_us_dollars_per_kw": 1700, - "om_cost_us_dollars_per_kw": 10, - "om_cost_us_dollars_per_kwh": 0.05, - "min_turn_down_pct": 0.32 - }, "Storage": { "max_kwh": 0, "max_kw": 0, @@ -74,7 +66,7 @@ post = {"Scenario": { }, "ColdTES": { "min_gal": 0, - "max_gal": 50000, + "max_gal": 0, "installed_cost_us_dollars_per_gal": 3, "thermal_decay_rate_fraction": 0.004, "om_cost_us_dollars_per_gal": 0, @@ -82,24 +74,11 @@ post = {"Scenario": { }, "HotTES": { "min_gal": 0, - "max_gal": 50000, + "max_gal": 0, "installed_cost_us_dollars_per_gal": 3, "thermal_decay_rate_fraction": 0.004, "om_cost_us_dollars_per_gal": 0, "internal_efficiency_pct": 0.97, - }, - "AbsorptionChiller": { - "min_ton": 0, - "max_ton": 5000, - "chiller_cop": 0.7, - "installed_cost_us_dollars_per_ton": 2000, - "om_cost_us_dollars_per_ton_per_year": 2, - }, - "PV": { - "min_kw": 0, - "max_kw": 0, - "installed_cost_us_dollars_per_kw": 1700.0, - "om_cost_us_dollars_per_kw": 16, } }}}
Don't raise when delete non-existent image in docker When delete an image, we check whether 404 is in the exception. If the image doesn't exist in docker, we can continue to delete the image in DB.
@@ -38,6 +38,8 @@ class DockerDriver(driver.ContainerImageDriver): with docker_utils.docker_client() as docker: try: docker.remove_image(img_id) + except errors.ImageNotFound: + return except errors.APIError as api_error: raise exception.ZunException(str(api_error)) except Exception as e:
doc/build_plugin_docs: Only load the required plugins When updating the pluginload only load the modules we want to document rather than load all avalible and then filter.
@@ -25,7 +25,7 @@ from wa.utils.doc import (strip_inlined_text, get_rst_from_plugin, get_params_rst, underline, line_break) from wa.utils.misc import capitalize -GENERATE_FOR_PLUGIN = ['workload', 'instrument', 'output_processor'] +GENERATE_FOR_PACKAGES = ['wa.workloads', 'wa.instruments', 'wa.output_processors'] def insert_contents_table(title='', depth=1): """ @@ -42,13 +42,11 @@ def insert_contents_table(title='', depth=1): def generate_plugin_documentation(source_dir, outdir, ignore_paths): pluginloader.clear() - pluginloader.update(paths=[source_dir], ignore_paths=ignore_paths) + pluginloader.update(packages=GENERATE_FOR_PACKAGES) if not os.path.exists(outdir): os.mkdir(outdir) for ext_type in pluginloader.kinds: - if not ext_type in GENERATE_FOR_PLUGIN: - continue outfile = os.path.join(outdir, '{}s.rst'.format(ext_type)) with open(outfile, 'w') as wfh: wfh.write('.. _{}s:\n\n'.format(ext_type.replace('_', '-')))
Fix latex formular error about *normal Summary: issue: the latex abort norm should be `\mathcal{N}(\text{mean}, \text{std}^2)` Pull Request resolved:
@@ -94,7 +94,7 @@ def uniform_(tensor, a=0., b=1.): def normal_(tensor, mean=0., std=1.): # type: (Tensor, float, float) -> Tensor r"""Fills the input Tensor with values drawn from the normal - distribution :math:`\mathcal{N}(\text{mean}, \text{std})`. + distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. Args: tensor: an n-dimensional `torch.Tensor` @@ -262,7 +262,7 @@ def xavier_normal_(tensor, gain=1.): described in `Understanding the difficulty of training deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal distribution. The resulting tensor will have values sampled from - :math:`\mathcal{N}(0, \text{std})` where + :math:`\mathcal{N}(0, \text{std}^2)` where .. math:: \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}} @@ -333,7 +333,7 @@ def kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'): described in `Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification` - He, K. et al. (2015), using a normal distribution. The resulting tensor will have values sampled from - :math:`\mathcal{N}(0, \text{std})` where + :math:`\mathcal{N}(0, \text{std}^2)` where .. math:: \text{std} = \sqrt{\frac{2}{(1 + a^2) \times \text{fan\_in}}}
Langkit_Support.Diagnostics: refactor To_Pretty_String TN:
@@ -8,14 +8,10 @@ package body Langkit_Support.Diagnostics is function To_Pretty_String (D : Diagnostic) return String is Sloc : constant Source_Location := Start_Sloc (D.Sloc_Range); - Line : constant String := Sloc.Line'Img; - Column : constant String := Sloc.Column'Img; Sloc_Prefix : constant String := (if Sloc = No_Source_Location then "" - else (Line (Line'First + 1 .. Line'Last) & - ":" & Column (Column'First + 1 .. Column'Last) - & ": ")); + else Image (Sloc) & ": "); begin return Sloc_Prefix & Image (To_Wide_Wide_String (D.Message)); end To_Pretty_String;
Turn off strict parsing for pls playlist files Some PLS files contain one 'Version' key for each file in the playlist. This ultimately has no impact on how mopidy parses files in the playlist, and therefore it should be as tolerant as possible of real-world playlist files.
@@ -77,7 +77,7 @@ def parse_extm3u(data): def parse_pls(data): # TODO: convert non URIs to file URIs. try: - cp = configparser.RawConfigParser() + cp = configparser.RawConfigParser(strict=False) cp.read_string(data.decode()) except configparser.Error: return
Fixes all URLs in the Site cog. This changes URLs for stuff like FAQ, rules, and the Asking Good Questions page to fit the Django format.
@@ -9,7 +9,7 @@ from bot.pagination import LinePaginator log = logging.getLogger(__name__) -INFO_URL = f"{URLs.site_schema}{URLs.site}/info" +PAGES_URL = f"{URLs.site_schema}{URLs.site}/pages" class Site: @@ -46,7 +46,7 @@ class Site: async def site_resources(self, ctx: Context): """Info about the site's Resources page.""" - url = f"{INFO_URL}/resources" + url = f"{PAGES_URL}/resources" embed = Embed(title="Resources") embed.set_footer(text=url) @@ -63,9 +63,9 @@ class Site: async def site_help(self, ctx: Context): """Info about the site's Getting Help page.""" - url = f"{INFO_URL}/help" + url = f"{PAGES_URL}/asking-good-questions" - embed = Embed(title="Getting Help") + embed = Embed(title="Asking Good Questions") embed.set_footer(text=url) embed.colour = Colour.blurple() embed.description = ( @@ -80,7 +80,7 @@ class Site: async def site_faq(self, ctx: Context): """Info about the site's FAQ page.""" - url = f"{INFO_URL}/faq" + url = f"{PAGES_URL}/frequently-asked-questions" embed = Embed(title="FAQ") embed.set_footer(text=url) @@ -105,7 +105,7 @@ class Site: **`rules`:** The rules a user wants to get. """ rules_embed = Embed(title='Rules', color=Colour.blurple()) - rules_embed.url = f"{URLs.site_schema}{URLs.site}/about/rules" + rules_embed.url = f"{PAGES_URL}/rules" if not rules: # Rules were not submitted. Return the default description.
change order of event callbacks in trainer call the event callback after saving the model for this epoch to make it possible to copy the model from the callback to somewhere else.
@@ -426,7 +426,6 @@ class KrakenTrainer(object): self.stopper.update(eval_res['val_metric']) self.model.user_metadata['accuracy'].append((self.iterations, float(eval_res['val_metric']))) logger.info('Saving to {}_{}'.format(self.filename_prefix, self.stopper.epoch)) - event_callback(epoch=self.stopper.epoch, **eval_res) # fill one_channel_mode after 1 iteration over training data set im_mode = self.train_set.dataset.im_mode if im_mode in ['1', 'L']: @@ -436,6 +435,7 @@ class KrakenTrainer(object): self.model.save_model('{}_{}.mlmodel'.format(self.filename_prefix, self.stopper.epoch)) except Exception as e: logger.error('Saving model failed: {}'.format(str(e))) + event_callback(epoch=self.stopper.epoch, **eval_res) @classmethod def recognition_train_gen(cls,
[doc] Use python2 release for older mw versions mw 1.14 stupport is still available and will not be dropped before sunset of Python 2 support.
@@ -2754,8 +2754,9 @@ class APISite(BaseSite): warn('\n' + fill('Support of MediaWiki {version} will be dropped. ' 'It is recommended to use MediaWiki 1.19 or above. ' - 'You may use Pywikibot stable release 3.0.20200111 ' - 'for older MediaWiki versions. ' + 'You may use every Pywikibot 3.0.X release from ' + 'pypi index or the "python2" release from the ' + 'repository for older MediaWiki versions. ' 'See T245350 for further information.' .format(version=version)), FutureWarning)
Removed unused code By chance I stumbled upon this code that isn't used anywhere apparently, so I propose to remove it.
@@ -58,11 +58,6 @@ class Instrument(object): self.SCPI = includeSCPI self.adapter = adapter - class Object(object): - pass - - self.get = Object() - self.isShutdown = False log.info("Initializing %s." % self.name)
sendmail: check if koji_task_owner name exists *CLOUDBLD-2089 Also, change log.exception into log.info to prevent unwanted tracebacks
@@ -330,6 +330,9 @@ class SendMailPlugin(ExitPlugin): else: if not self.email_domain: raise RuntimeError("Empty email_domain specified") + elif not obj.get('name'): + raise RuntimeError("Koji task owner name is missing") + else: return '@'.join([obj['name'], self.email_domain]) def _get_koji_submitter(self): @@ -386,7 +389,7 @@ class SendMailPlugin(ExitPlugin): try: koji_task_owner_email = self._get_koji_submitter() except Exception: - self.log.exception("Failed to include a task submitter") + self.log.info("Failed to include a task submitter") else: receivers_list.append(koji_task_owner_email)
Remove useless translation markers These strings are used in an english default text and thus translating them doesn't make much sense. fixes
@@ -29,7 +29,6 @@ from indico.modules.events.contributions.models.fields import ContributionFieldV from indico.modules.events.models.persons import EventPerson from indico.modules.events.tracks.models.principals import TrackPrincipal from indico.modules.events.tracks.models.tracks import Track -from indico.util.i18n import _ from indico.util.spreadsheets import unique_col from indico.web.flask.templating import get_template_module @@ -121,9 +120,9 @@ def create_mock_abstract(event): kibble = User(full_name="Tom Kibble", first_name="Tom", last_name="Kibble", title="Prof.") higgs = User(full_name="Peter Higgs", first_name="Peter", last_name="Higgs", title="Prof.") - track = Track(title=_("Higgs Fields")) - session = Session(title=_("Higgs Fields Posters")) - contribution_type = ContributionType(name=_("Poster")) + track = Track(title="Higgs Fields") + session = Session(title="Higgs Fields Posters") + contribution_type = ContributionType(name="Poster") contribution = Contribution(title="Broken Symmetry and the Mass of Gauge Vector Mesons", track=track, session=session,
modify the annotations of delete_snapshot The annotations of delete_snapshot were written as backup.
@@ -239,11 +239,11 @@ class BlockStorage(service.UnifiedService): @service.should_be_overridden def delete_snapshot(self, snapshot): - """Delete the given backup. + """Delete the given snapshot. - Returns when the backup is actually deleted. + Returns when the snapshot is actually deleted. - :param backup: backup instance + :param snapshot: snapshot instance """ self._impl.delete_snapshot(snapshot)
Ensure image files are closed after opening Not sure I managed to trigger `image_as_rtf()` properly, but the other worked without a hitch and it seems a pretty benign change.
@@ -828,7 +828,7 @@ def image_as_rtf(match, question=None): if not os.path.isfile(page_file['fullpath']): server.fg_make_png_for_pdf_path(file_info['path'] + '.pdf', 'page') if os.path.isfile(page_file['fullpath']): - im = PIL.Image.open(page_file['fullpath']) + with PIL.Image.open(page_file['fullpath']) as im: page_file['width'], page_file['height'] = im.size output += rtf_image(page_file, width, False) else: @@ -987,7 +987,7 @@ def image_url(file_reference, alt_text, width, emoji=False, question=None, exter layout_width = attributes['width'] layout_height = attributes['height'] else: - im = PIL.Image.open(file_info['fullpath']) + with PIL.Image.open(file_info['fullpath']) as im: layout_width, layout_height = im.size return '<img ' + alt_text + 'class="daicon daimageref' + extra_class + '" width=' + str(layout_width) + ' height=' + str(layout_height) + ' style="' + width_string + '; height: auto;" src="' + the_url + '"/>' except:
plotting: ensure autoscalling See sympy/sympy#19088 and matplotlib/matplotlib#17004
@@ -942,6 +942,10 @@ def process_series(self): if parent.ylabel: self.ax.set_ylabel(parent.ylabel, position=(0, 1)) + if not isinstance(self.ax, Axes3D): + self.ax.autoscale_view(scalex=self.ax.get_autoscalex_on(), + scaley=self.ax.get_autoscaley_on()) + def show(self): self.process_series() # TODO after fixing https://github.com/ipython/ipython/issues/1255
utils/travis-script.sh: minor refactoring TN:
@@ -18,11 +18,5 @@ gprbuild -v --no-auto-path \ | tee TESTSUITE_OUT -# Exit with an error if there is a FAILED line in -# TESTSUITE_OUT. -if grep "FAILED " TESTSUITE_OUT; then - exit 1 -else - exit 0 -fi - +# Exit with an error if there is a FAILED line in TESTSUITE_OUT +! grep "FAILED " TESTSUITE_OUT > /dev/null
lib: avoid changing process.config PR-URL: Refs:
@@ -96,7 +96,7 @@ function configure (gyp, argv, callback) { log.verbose('build/' + configFilename, 'creating config file') - var config = process.config || {} + var config = Object.assign({}, process.config) var defaults = config.target_defaults var variables = config.variables
bugfix in grad.sacasscf: get_veff -> get_jk The veff-like derivatives in grad.sacasscf are of the form vj - vk/2 whether the underlying SCF is RHF or ROHF. Using get_veff causes errors in the latter case.
@@ -118,7 +118,9 @@ def Lorb_dot_dgorb_dx (Lorb, mc, mo_coeff=None, ci=None, atmlst=None, mf_grad=No dme0 = (gfock+gfock.T)/2 # This transpose is for the overlap matrix later on aapa = vj = vk = vhf_c = vhf_a = None - vhf1c, vhf1a, vhf1cL, vhf1aL = mf_grad.get_veff(mol, (dm_core, dm_cas, dmL_core, dmL_cas)) + vj, vk = mf_grad.get_jk (mol, (dm_core, dm_cas, dmL_core, dmL_cas)) + vhf1c, vhf1a, vhf1cL, vhf1aL = vj - vk * 0.5 + #vhf1c, vhf1a, vhf1cL, vhf1aL = mf_grad.get_veff(mol, (dm_core, dm_cas, dmL_core, dmL_cas)) hcore_deriv = mf_grad.hcore_generator(mol) s1 = mf_grad.get_ovlp(mol) @@ -260,7 +262,9 @@ def Lci_dot_dgci_dx (Lci, weights, mc, mo_coeff=None, ci=None, atmlst=None, mf_g dme0 = reduce(np.dot, (mo_coeff, (gfock+gfock.T)*.5, mo_coeff.T)) aapa = vj = vk = vhf_c = vhf_a = h1 = gfock = None - vhf1c, vhf1a = mf_grad.get_veff(mol, (dm_core, dm_cas)) + vj, vk = mf_grad.get_jk (mol, (dm_core, dm_cas)) + vhf1c, vhf1a = vj - vk * 0.5 + #vhf1c, vhf1a = mf_grad.get_veff(mol, (dm_core, dm_cas)) hcore_deriv = mf_grad.hcore_generator(mol) s1 = mf_grad.get_ovlp(mol)
Update documentation build examples to be generator agnostic Now that the default CMake generator used by `build.sh` is Ninja we should provide generator agnostic build instructions. Authors: - Robert Maynard (https://github.com/robertmaynard) Approvers: - Dante Gama Dessavre (https://github.com/dantegd) URL:
@@ -58,12 +58,12 @@ Current cmake offers the following configuration options: After running CMake in a `build` directory, if the `BUILD_*` options were not turned `OFF`, the following targets can be built: ```bash -$ make -j # Build libcuml++ and all tests -$ make -j sg_benchmark # Build c++ cuml single gpu benchmark -$ make -j cuml++ # Build libcuml++ -$ make -j ml # Build ml_test algorithm tests binary -$ make -j ml_mg # Build ml_mg_test multi GPU algorithms tests binary -$ make -j prims # Build prims_test ML primitive unit tests binary +$ cmake --build . -j # Build libcuml++ and all tests +$ cmake --build . -j --target sg_benchmark # Build c++ cuml single gpu benchmark +$ cmake --build . -j --target cuml++ # Build libcuml++ +$ cmake --build . -j --target ml # Build ml_test algorithm tests binary +$ cmake --build . -j --target ml_mg # Build ml_mg_test multi GPU algorithms tests binary +$ cmake --build . -j --target prims # Build prims_test ML primitive unit tests binary ``` ### Third Party Modules
Update simulator.py Fix invalid torso tracker return values in simulator.py
@@ -1205,7 +1205,7 @@ class Simulator: tracker_data = self.renderer.vrsys.getDataForVRTracker(tracker_serial_number) # Set is_valid to false, and assume the user will check for invalid data if not tracker_data: - return [False, None, None] + return [False, [0,0,0], [0,0,0,0]] is_valid, translation, rotation = tracker_data return [is_valid, translation, rotation]
fix exp fam. formula Summary: Pull Request resolved:
@@ -9,7 +9,7 @@ class ExponentialFamily(Distribution): .. math:: - p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle) - F(\theta) + k(x)) + p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x)) where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic, :math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier
package.json: Move difflib to devDependencies. We introduced this a couple of commits ago (in for use in the tests only. Putting it here avoids pulling in a new dependency in production which we don't use there.
"@types/webpack": "3.0.13", "blueimp-md5": "2.10.0", "clipboard": "1.5.16", - "difflib": "0.2.4", "emoji-datasource": "3.0.0", "emoji-datasource-apple": "3.0.0", "emoji-datasource-emojione": "3.0.0", "devDependencies": { "casperjs": "casperjs/casperjs", "cssstyle": "0.2.29", + "difflib": "0.2.4", "eslint": "3.9.1", "htmlparser2": "3.8.3", "istanbul": "0.4.5",
left sidebar: Fix gaps between hover areas. A somewhat recent refactoring of the left sidebar had introduced a gap between the hover areas that looked off; this fixes this with a slight rearrangement with where the 1px of space between elements lives. Fixes
@@ -93,15 +93,15 @@ li.show-more-topics a { margin-bottom: 4px; } -.narrows_panel li { - margin: 1px 0px; +.narrows_panel li a { + margin-top: 1px; } .narrows_panel li a:hover { text-decoration: none; } -#stream_filters li { +#stream_filters li a { padding: 1px 0px; }
Update README_en.md add link to SlowFast_FasterRCNN_en.md
@@ -63,7 +63,7 @@ PaddleVideo is a video model development kit produced by [PaddlePaddle Official] <td colspan="5" style="font-weight:bold;">Spatio-temporal motion detection method</td> </tr> <tr> - <td><a href="slowfast.md">SlowFast+Fast R-CNN</a> + <td><a href="docs/en/model_zoo/detection/SlowFast_FasterRCNN_en.md">SlowFast+Fast R-CNN</a> <td></td> <td></td> <td></td>
fix: Doc layout Add breadcrumbs Overridable page_toc block
{% extends "templates/base.html" %} {%- from "templates/includes/navbar/navbar_items.html" import render_item -%} -{% macro page_content() %} -{%- block page_content -%}{%- endblock -%} -{% endmacro %} - {%- block head_include %} <link rel="stylesheet" href="/assets/frappe/css/hljs-night-owl.css"> {% endblock -%} {% block content %} -{% macro main_content() %} -<div class="page-content-wrapper"> - {% block page_container %} - <main> - <div class="page_content page-content doc-content"> - {{ page_content() }} - </div> - </main> - {% endblock %} -</div> -{% endmacro %} {% macro container_attributes() -%} id="page-{{ name or route | e }}" data-path="{{ pathname | e }}" @@ -99,13 +84,28 @@ id="page-{{ name or route | e }}" data-path="{{ pathname | e }}" </aside> </div> <div class="main-column doc-main col-12 col-lg-10 col-xl-8"> - {{ main_content() }} + <div class="page-content-wrapper"> + {% block page_container %} + <main> + <div class="page_content page-content doc-content"> + {%- if add_breadcrumbs -%} + {% include "templates/includes/breadcrumbs.html" %} + {%- endif -%} + {%- block page_content -%}{%- endblock -%} + </div> + </main> + {% endblock %} + </div> </div> <div class="page-toc col-sm-2 d-none d-xl-block"> + {% block page_toc %} + {% if page_toc_html %} <div> <h5>On this page</h5> {{ page_toc_html }} </div> + {% endif %} + {% endblock %} </div> </div> </div>
Coerce seconds argument to a floating point number. Celery does not coerce configuration values into the right type (See celery/celery#6696). This is a workaround. This bug will be fixed in Celery NextGen when we will refactor our configuration subsystem.
@@ -155,7 +155,7 @@ class Timer: return self._enter(eta, priority, entry) def enter_after(self, secs, entry, priority=0, time=monotonic): - return self.enter_at(entry, time() + secs, priority) + return self.enter_at(entry, time() + float(secs), priority) def _enter(self, eta, priority, entry, push=heapq.heappush): push(self._queue, scheduled(eta, priority, entry))
TYP,ENH: Add annotations for the new `ABCPolyBase.symbol` property Xref
@@ -9,6 +9,8 @@ class ABCPolyBase(abc.ABC): maxpower: ClassVar[int] coef: Any @property + def symbol(self) -> str: ... + @property @abc.abstractmethod def domain(self): ... @property @@ -21,7 +23,7 @@ def has_samecoef(self, other): ... def has_samedomain(self, other): ... def has_samewindow(self, other): ... def has_sametype(self, other): ... - def __init__(self, coef, domain=..., window=...): ... + def __init__(self, coef, domain=..., window=..., symbol: str = ...) -> None: ... def __format__(self, fmt_str): ... def __call__(self, arg): ... def __iter__(self): ...
simplify alias compare function
@@ -63,12 +63,11 @@ def _get_optimization_history_plot(study): return go.Figure(data=[], layout=layout) best_values = [float('inf')] if study.direction == StudyDirection.MINIMIZE else [-float('inf')] + comp = min if study.direction == StudyDirection.MINIMIZE else max for trial in trials: trial_value = trial.value - if study.direction == StudyDirection.MINIMIZE: - best_values.append(min(best_values[-1], trial_value)) - else: - best_values.append(max(best_values[-1], trial_value)) + assert trial_value is not None # For mypy + best_values.append(comp(best_values[-1], trial_value)) best_values.pop(0) traces = [ go.Scatter(x=[t.number for t in trials], y=[t.value for t in trials],
core: Fix action of 'z' on self-messages. When 'z' hotkey was used in the All private messages narrow, on self messages, it did not switch narrow to PM with oneself. We now handle this logic explicitly.
@@ -174,6 +174,8 @@ class Controller: emails = [recipient['email'] for recipient in button.message['display_recipient'] if recipient['email'] != self.model.client.email] + if not emails and len(button.message['display_recipient']) == 1: + emails = [self.model.user_email] user_emails = ', '.join(emails) user_ids = {user['id'] for user in button.message['display_recipient']}
Fix the path of calculate_rft.py Given the suggestion in
@@ -1269,7 +1269,7 @@ if ! "${skip_eval}"; then _fs=$(python3 -c "import humanfriendly as h;print(h.parse_size('${fs}'))") _sample_shift=$(python3 -c "print(1 / ${_fs} * 1000)") # in ms ${_cmd} JOB=1 "${_logdir}"/calculate_rtf.log \ - ../../../utils/calculate_rtf.py \ + calculate_rtf.py \ --log-dir ${_logdir} \ --log-name "asr_inference" \ --input-shift ${_sample_shift} \
Use pchanges in highstate output module if changes key is empty and pchanges is present As per discussion in pchanges should be used for dry-run, it requires proper handling in the output module to correctly report changes to the user
@@ -221,6 +221,8 @@ def _format_host(host, data): tcolor = colors['GREEN'] orchestration = ret.get('__orchestration__', False) schanged, ctext = _format_changes(ret['changes'], orchestration) + if not ctext and 'pchanges' in ret: + schanged, ctext = _format_changes(ret['pchanges'], orchestration) nchanges += 1 if schanged else 0 # Skip this state if it was successful & diff output was requested
0.6.7 Auto connecting and minor bugfixes
@@ -24,7 +24,6 @@ while True: content = res.content.decode().splitlines() #Read content and split into lines host = content[0] #Line 1 = pool address port = content[1] #Line 2 = pool port - print(host, port) debug = debug + "Received pool IP and port.\n" break else:
Remove print statement from has_transparency Closes
@@ -124,7 +124,6 @@ def has_transparency(colour: Union[ColorType, List[ColorType]]): return has_alpha(colour) elif isinstance(colour, list): - print([c for c in colour]) return any([has_transparency(c) for c in colour]) return False
Rectified two broken links Two links are broken as they have absolute URLs. Changing the URLs from absolute to relative, and the links are working fine.
@@ -114,8 +114,8 @@ After adding Python to your Windows PATH, you should then be able to follow the ## Check #7 [Windows]: Do you need Build Tools for Visual Studio installed? -Starting with version [0.63](http://localhost:8000/changelog.html#version-0-63-0) (July 2020), Streamlit added [pyarrow](https://arrow.apache.org/docs/python/) as an install dependency -as part of the [Streamlit Components](http://localhost:8000/streamlit_components.html) feature release. Occasionally, when trying to install Streamlit from +Starting with version [0.63](../changelog.html#version-0-63-0) (July 2020), Streamlit added [pyarrow](https://arrow.apache.org/docs/python/) as an install dependency +as part of the [Streamlit Components](../streamlit_components.html) feature release. Occasionally, when trying to install Streamlit from PyPI, you may see errors such as the following: ```shell
Moved RedLockTest and Tanium to skipped Increased Phishing test - attachment timeout
}, { "playbookID": "Phishing test - attachment", - "timeout": 500, + "timeout": 600, "nightly": true, "integrations": [ "EWS Mail Sender", "TruSTAR Test": "The test runs even when not supposed to, which causes its quota to run out", "Tenable.io test": "Error 409 ISSUE OPENED", "Tenable.io Scan Test": "Error 409 ISSUE OPENED", - "FireEye HX Test": "Problem with file acquisition - need to contact FireEye " + "FireEye HX Test": "Problem with file acquisition - need to contact FireEye ", + "RedLockTest": "RedLock has API issues - opened an issue (15493)" }, "skipped_integrations": { "Jask": "Cannot access instance token not valid", "Google Resource Manager": "Cannot create projects because have reached alloted quota", "RSA NetWitness Endpoint": "Instance is down, waiting for devops to rebuild", "WildFire": "Quota is temporarily over due to circle ci issues", - "Freshdesk": "Trial account expired" + "Freshdesk": "Trial account expired", + "Tanium": "Instance is not stable (issue 15497)" }, "nigthly_integrations": [ "Lastline",
Update imports for relu6 removal in Keras 2.2.2 Keras 2.2.2 doesn't have keras_applications.mobilenet.relu6 anymore, resulting in an ImportError if code is run with this and posterior versions. This commit checks for the appropriate Keras version and provides a dummy replacement for relu6.
@@ -7,7 +7,10 @@ from distutils.version import StrictVersion as _StrictVersion if _keras.__version__ >= _StrictVersion('2.2.0'): from keras.layers import DepthwiseConv2D + if _keras.__version__ <= _StrictVersion('2.2.1'): from keras_applications.mobilenet import relu6 + else: + relu6 = lambda x: _keras.activations.relu(x, max_value=6.0) else: from keras.applications.mobilenet import DepthwiseConv2D, relu6
Fixed bug causing "MAC verified OK" message Fix for verbose stderr output bug in openssl cmd
@@ -88,7 +88,8 @@ class CryptUtil(object): first_proc = subprocess.Popen(first_cmd, stdout=subprocess.PIPE) - second_proc = subprocess.Popen(second_cmd, stdin=first_proc.stdout, stdout=subprocess.PIPE) + second_proc = subprocess.Popen(second_cmd, stdin=first_proc.stdout, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) first_proc.stdout.close() # see https://docs.python.org/2/library/subprocess.html#replacing-shell-pipeline stdout, stderr = second_proc.communicate()
check if DD initialized This can happen in unit tests
@@ -105,10 +105,13 @@ class DatadogMetrics(HqMetrics): def _create_event(self, title: str, text: str, alert_type: str = ALERT_INFO, tags: dict = None, aggregation_key: str = None): + if datadog_initialized(): api.Event.create( title=title, text=text, tags=tags, alert_type=alert_type, aggregation_key=aggregation_key, ) + else: + datadog_logger.debug('Metrics event: (%s) %s\n%s\n%s', alert_type, title, text, tags) def _datadog_record(fn, name, value, tags=None): @@ -116,3 +119,7 @@ def _datadog_record(fn, name, value, tags=None): fn(name, value, tags=tags) except Exception: datadog_logger.exception('Unable to record Datadog stats') + + +def datadog_initialized(): + return api._api_key and api._application_key
Unify handling of type_dispatched_args in gen_python_functions. This is just to simplify the handling, there is no generated code difference.
@@ -255,8 +255,20 @@ def create_python_bindings(python_functions, has_self, is_module=False): inputs = [arg for arg in declaration['arguments'] if not is_output(arg)] outputs = [arg for arg in declaration['arguments'] if is_output(arg)] - type_dispatched_args = [arg for arg in declaration['arguments'] if arg.get('is_type_dispatched')] - assert len(type_dispatched_args) <= 1 + + def get_type_dispatched(args): + return [arg for arg in args if arg.get('is_type_dispatched')] + type_dispatched_actual_args = get_type_dispatched(declaration['arguments']) + type_dispatched_bindings = get_type_dispatched(declaration['python_binding_arguments']) + assert len(type_dispatched_actual_args + type_dispatched_bindings) <= 1 + if type_dispatched_bindings and len(outputs) == 0: + # out(s) determines the dtype if it is present, so only use this if there are no outputs. + type_dispatched_args = type_dispatched_bindings + else: + type_dispatched_args = type_dispatched_actual_args + + if type_dispatched_args and len(outputs) > 1: + raise RuntimeError("Not supported: type dispatched parameter with multiple outputs") def parse_arg(arg, arg_index, unpack_args=False): name = arg['name'] @@ -324,23 +336,16 @@ def create_python_bindings(python_functions, has_self, is_module=False): arg_idx += 1 # check python_binding_arguments - has_dtype_bind = False has_device_bind = False requires_grad = None python_binding_arguments = declaration.get('python_binding_arguments', []) if 'dtype' in (a['name'] for a in python_binding_arguments): - dtype_idx, device_idx, requires_grad_idx = (arg_idx, arg_idx + 1, arg_idx + 2) - else: + arg_idx += 1 # we already handled this in type_dispatched_args device_idx, requires_grad_idx = (arg_idx, arg_idx + 1) for arg in python_binding_arguments: if arg['name'] == 'dtype' and arg['simple_type'] == 'Type': - # out(s) determines the dtype if it is present, so don't pass the dtype to the dispatch. - if len(outputs) == 0: - has_dtype_bind = True - append_actuals_formals(*parse_arg(arg, dtype_idx)) - elif len(outputs) > 1: - raise RuntimeError("Not supported: dtype parameter with multiple outputs") + pass # already handled by type_dispatched_args elif arg['name'] == 'device' and arg['simple_type'] == 'int64_t': if len(outputs) == 0: has_device_bind = True @@ -354,7 +359,7 @@ def create_python_bindings(python_functions, has_self, is_module=False): env['unpack_args'] = [] env['formal_args'] = formal_args env['actuals'] = actuals - has_any_dtype = has_dtype_bind or any(a['name'] == 'dtype' and a['simple_type'] == 'Type' for a in inputs) + has_any_dtype = any(a['name'] == 'dtype' and a['simple_type'] == 'Type' for a in inputs) type_dispatched_name = type_dispatched_args[0]['name'] if len(type_dispatched_args) > 0 else None maybe_init_cuda = 'dtype' if has_any_dtype else type_dispatched_name env['initialize_cuda'] = 'maybe_initialize_cuda({});'.format(maybe_init_cuda) if maybe_init_cuda else []
Updated document Updated readme,added about ipython
@@ -24,6 +24,7 @@ Overview Welcome to IPython. Our full documentation is available on `ipython.readthedocs.io <https://ipython.readthedocs.io/en/stable/>`_ and contains information on how to install, use, and contribute to the project. +IPython (Interactive Python) is a command shell for interactive computing in multiple programming languages, originally developed for the Python programming language, that offers introspection, rich media, shell syntax, tab completion, and history. **IPython versions and Python Support**
Improved presentation Add round(*,2) to the extended price (break price) information comment to avoid long decimal numbers of python and by compatible witk price.
@@ -1212,7 +1212,7 @@ def add_dist_to_worksheet(wks, wrk_formats, index, start_row, start_col, # Sort the tiers based on quantities and turn them into lists of strings. qtys = sorted(price_tiers.keys()) prices = [str(price_tiers[q]) for q in qtys] - prices_ext = [str(price_tiers[qtys[q]]*int(qtys[q])) for q in range(len(qtys))] # Evaluate the extended prices, use in the "is more convinient buy the next price break quantity?". + prices_ext = [str( round( price_tiers[qtys[q]]*int(qtys[q]), 2 )) for q in range(len(qtys))] # Evaluate the extended prices, use in the "is more convinient buy the next price break quantity?". qtys = [str(q) for q in qtys] purch_qty_col = start_col + columns['purch']['col']
Stop SWO before starting it. This addresses the case where the probe already had SWO running, which for CMSIS-DAP will cause a command error if you attempt to restart it.
@@ -125,6 +125,11 @@ class SWVReader(threading.Thread): thread runs, it reads SWO data from the probe and passes it to the SWO parser created in init(). When the thread is signaled to stop, it calls DebugProbe.swo_stop() before exiting. """ + # Stop SWO first in case the probe already had it started. Ignore if this fails. + try: + self._session.probe.swo_stop() + except exceptions.ProbeError: + pass self._session.probe.swo_start(self._swo_clock) while not self._shutdown_event.is_set():
llvm, function/Distance: Provide custom output struct type The default will change in the following commit
@@ -9807,12 +9807,6 @@ class Distance(ObjectiveFunction): self.functionOutputType = None - # Override defaults. We only output single value - @property - def _result_length(self): - return 1; - - def _validate_params(self, request_set, target_set=None, variable=None, context=None): """Validate that variable had two items of equal length @@ -9846,6 +9840,16 @@ class Distance(ObjectiveFunction): ) ) + @property + def _result_length(self): + # Override defaults. We only output single value + return 1 + + def get_output_struct_type(self): + with pnlvm.LLVMBuilderContext() as ctx: + return ir.ArrayType(ctx.float_ty, self._result_length) + + def __gen_llvm_difference(self, builder, index, ctx, v1, v2, acc): ptr1 = builder.gep(v1, [index]) ptr2 = builder.gep(v2, [index])
Update matchms/importing/load_from_msp.py Thank you for the suggestion, I totally ignored that.
@@ -50,18 +50,16 @@ def parse_msp_file(filename: str) -> List[dict]: # Obtaining the masses and intensities if int(params['num peaks']) == peakscount: peakscount = 0 - spectrums.append( - { + yield { 'params': (params), 'm/z array': numpy.array(masses), 'intensity array': numpy.array(intensities) } - ) + params = {} masses = [] intensities = [] - return spectrums def load_from_msp(filename: str) -> Generator[Spectrum, None, None]:
Use contains to catch -local sources Changed a filter to use contains instead of iexact for local source matching
@@ -76,7 +76,7 @@ class SourceFilter(FilterSet): """Source custom filters.""" name = CharListFilter(field_name="name", lookup_expr="name__icontains") - type = CharListFilter(field_name="source_type", lookup_expr="source_type__iexact") + type = CharListFilter(field_name="source_type", lookup_expr="source_type__contains") class Meta: model = Sources
Attempt to fix test_uiawrapper.WindowWrapperTests.test_issue_443 File "C:\projects\pywinauto\pywinauto\unittests\test_uiawrapper.py", line 2142, in test_issue_443 self.assertEqual(self.dlg.is_minimized(), True) AssertionError: False != True
@@ -2115,6 +2115,7 @@ if UIA_support: def test_issue_443(self): """Test .set_focus() for window that is not keyboard focusable""" self.dlg.minimize() + time.sleep(0.2) self.assertEqual(self.dlg.is_minimized(), True) self.dlg.set_focus() self.assertEqual(self.dlg.is_minimized(), False)
Upgrade shaker version to 1.1.3 This mainly fixes CentOS image builds.
@@ -37,7 +37,7 @@ rally_version: 0.10.1 shaker_venv: "{{browbeat_path}}/.shaker-venv" # Shaker version to Install -shaker_version: 1.1.0 +shaker_version: 1.1.3 # PerfKitBenchmarker Settings perfkit_venv: "{{browbeat_path}}/.perfkit-venv"
removes redundant check y_axis_column i.e a GraphDisplayColumn should always be a dict
@@ -801,10 +801,7 @@ class ReportConfiguration(QuickCachedDocumentMixin, Document): y_axis_columns = [] try: for y_axis_column in original_y_axis_columns: - if isinstance(y_axis_column, dict): column_id = y_axis_column['column_id'] - else: - column_id = y_axis_column column_config = self.report_columns_by_column_id[column_id] if column_config.type == 'expanded': expanded_columns = self.get_expanded_columns(column_config)
Put no coverage pragmas on __str__ methods for new errors They aren't covered for the same reasons as most other error __str__ methods.
@@ -67,7 +67,10 @@ class StratisCliPartialChangeError(StratisCliRuntimeError): """ return self.changed_resources != frozenset() - def __str__(self): + # pylint: disable=fixme + # FIXME: remove no coverage pragma when adequate testing for CLI output + # exists. + def __str__(self): # pragma: no cover if len(self.unchanged_resources) > 1: msg = "The '%s' action has no effect for resources %s" % ( self.command, @@ -137,7 +140,10 @@ class StratisCliInUseError(StratisCliRuntimeError): self.blockdevs = blockdevs self.added_as = added_as - def __str__(self): + # pylint: disable=fixme + # FIXME: remove no coverage pragma when adequate testing for CLI output + # exists. + def __str__(self): # pragma: no cover (target_blockdev_tier, already_blockdev_tier) = ( BLOCK_DEV_TIER_TO_NAME(self.added_as), BLOCK_DEV_TIER_TO_NAME(
SA-CASSCF average-energy gradient logic Must set "converged=True" if we can skip Lagrange multipliers.
@@ -496,7 +496,7 @@ class Gradients (lagrange.Gradients): eris = self.eris = self.base.ao2mo (mo) if mf_grad is None: mf_grad = self.base._scf.nuc_grad_method () if state is None: - self.converged = self.base.converged + self.converged = True return casscf_grad.Gradients (self.base).kernel (mo_coeff=mo, ci=ci, atmlst=atmlst, verbose=verbose) if e_states is None:
Add Documentation URL This adds a [Documentation URL](https://packaging.python.org/guides/distributing-packages-using-setuptools/#project-urls), which will display in the left-hand nav of the projects PyPI page, allowing users arriving there to get to the documentation slightly faster.
@@ -94,4 +94,7 @@ setup( "License :: OSI Approved :: Apache Software License", "Topic :: Software Development :: Testing", ], + project_urls={ + "Documentation": "http://docs.getmoto.org/en/latest/", + }, )
Removed core pool initializing from adapter. As core pool is initialized by OCF, it is no longer required to do it in adapter
@@ -426,8 +426,6 @@ int cas_initialize_context(void) goto err_block_dev; } - ocf_mngt_core_pool_init(cas_ctx); - return 0; err_block_dev: @@ -444,7 +442,6 @@ err_ctx: int cas_cleanup_context(void) { - ocf_mngt_core_pool_deinit(cas_ctx); block_dev_deinit(); atomic_dev_deinit(); cas_garbage_collector_deinit();
SceneReaderPathPreview : Remove AlembicSource node It was unused, since Alembic loading is now done through the SceneReader.
@@ -54,9 +54,6 @@ class SceneReaderPathPreview( GafferUI.PathPreviewWidget ) : # for reading IECore.SceneInterface files (scc, lscc) self.__script["SceneReader"] = GafferScene.SceneReader() - # for reading Alembic files (abc) - self.__script["AlembicSource"] = GafferScene.AlembicSource() - # for reading more generic single object files (cob, ptc, pdc, etc) ## \todo: can we unify all file input to SceneReader by creating a SceneInterface that makes # single object scenes using Reader ops behind the scenes? @@ -94,8 +91,7 @@ class SceneReaderPathPreview( GafferUI.PathPreviewWidget ) : else : ext = str(path).split( "." )[-1] - supported = set( [ "abc" ] ) - supported.update( GafferScene.SceneReader.supportedExtensions() ) + supported = set( GafferScene.SceneReader.supportedExtensions() ) supported.update( IECore.Reader.supportedExtensions() ) # no reason to preview a single image as a 3D scene supported.difference_update( IECore.Reader.supportedExtensions( IECore.TypeId.ImageReader ) ) @@ -105,7 +101,6 @@ class SceneReaderPathPreview( GafferUI.PathPreviewWidget ) : def _updateFromPath( self ) : self.__script["SceneReader"]["fileName"].setValue( "" ) - self.__script["AlembicSource"]["fileName"].setValue( "" ) self.__script["ObjectPreview"]["fileName"].setValue( "" ) if not self.isValid() : @@ -156,12 +151,6 @@ class SceneReaderPathPreview( GafferUI.PathPreviewWidget ) : self.__script["ObjectPreview"]["fileName"].setValue( fileName ) outPlug = self.__script["ObjectPreview"]["out"] - elif ext == "abc" : - - self.__script["AlembicSource"]["fileName"].setValue( fileName ) - outPlug = self.__script["AlembicSource"]["out"] - ## \todo: determine the frame range from the abc file - self.__script["OpenGLAttributes"]["in"].setInput( outPlug ) # update the timeline
Remove slow from sacremoses The issue has been resolved on upstream. Test run time on Circle CI: ~= 0.4 second.
import io -import unittest import torchtext.data as data from torchtext.utils import unicode_csv_reader @@ -22,9 +21,6 @@ class TestUtils(TorchtextTestCase): "A", "string", ",", "particularly", "one", "with", "slightly", "complex", "punctuation", "."] - # TODO: Remove this once issue was been resolved. - # TODO# Add nltk data back in build_tools/travis/install.sh. - @unittest.skip("Impractically slow! https://github.com/alvations/sacremoses/issues/61") def test_get_tokenizer_moses(self): # Test Moses option. # Note that internally, MosesTokenizer converts to unicode if applicable
Update changelog.md Small grammatical tweak.
@@ -7,7 +7,7 @@ Also see [changelog in progress](http://bit.ly/2nK3cVf) for the next release. ## Release v4.0.2 - **v4.0.2, released 2017-07-31** - - Fixed issue when using single-sign-on with GitLab (and in Enterprise Edition with SAML, Office365 and G Suite), where using a non-English language option in Account Settings may result in a login failure. + - Fixed issue when using single-sign-on with GitLab (and in Enterprise Edition with SAML, Office365 and G Suite), where using a non-English language option in Account Settings resulted in a login failure. - Fixed issue with custom slash commands not working in direct message channels. - Fixed issue with GitLab and SAML single sign-on in Mattermost mobile apps redirecting to a browser page. - **v4.0.1, released 2017-07-18**
Address some PR feedback keep 'comment' in sync organize test ID name
@@ -99,7 +99,7 @@ def _keep_first_some(values: Sequence[Any]) -> Any: if value: return value raise AssertionError( - "``_keep_some`` should find at least one valid option; check configuration." + "``_keep_first_some`` should find at least one valid option; check configuration." ) @@ -191,18 +191,18 @@ def _generate_pytest_case_from( ) -> Tuple[TestCase, str]: """ id format: - f"{TEST_TYPE_NAME}_{HANDLER_TYPE_NAME}_{TEST_SUITE_NAME}_{TEST_CASE_NAME}:{CONFIG_TYPE_NAME}_{FORK_TYPE_NAME}" # noqa: E501 + f"{TEST_TYPE_NAME}_{CONFIG_TYPE_NAME}_{FORK_TYPE_NAME}_{HANDLER_TYPE_NAME}_{TEST_SUITE_NAME}_{TEST_CASE_NAME}" # noqa: E501 """ - # special case only one handler "core" test_name = test_type.name handler_name = handler_type.name + config_name = config_type.name + fork_name = fork_type.name - test_id_prefix = thread_last( - (test_name, handler_name, suite_name, test_case.name), + test_id = thread_last( + (test_name, config_name, fork_name, handler_name, suite_name, test_case.name), (filter, lambda component: component != ""), lambda components: "_".join(components), ) - test_id = f"{test_id_prefix}:{config_type.name}_{fork_type.name}" return test_case, test_id
Arnold Renderer : Initialise autobumVisibility in Displacement constructor The idea being that Displacement takes full responsibility for its own data.
@@ -825,13 +825,6 @@ class ArnoldAttributes : public IECoreScenePreview::Renderer::AttributesInterfac updateVisibility( m_visibility, g_specularTransmitVisibilityAttributeName, AI_RAY_SPECULAR_TRANSMIT, attributes ); updateVisibility( m_visibility, g_volumeVisibilityAttributeName, AI_RAY_VOLUME, attributes ); updateVisibility( m_visibility, g_subsurfaceVisibilityAttributeName, AI_RAY_SUBSURFACE, attributes ); - updateVisibility( m_displacement.autoBumpVisibility, g_cameraVisibilityAutoBumpAttributeName, AI_RAY_CAMERA, attributes ); - updateVisibility( m_displacement.autoBumpVisibility, g_diffuseReflectVisibilityAutoBumpAttributeName, AI_RAY_DIFFUSE_REFLECT, attributes ); - updateVisibility( m_displacement.autoBumpVisibility, g_specularReflectVisibilityAutoBumpAttributeName, AI_RAY_SPECULAR_REFLECT, attributes ); - updateVisibility( m_displacement.autoBumpVisibility, g_diffuseTransmitVisibilityAutoBumpAttributeName, AI_RAY_DIFFUSE_TRANSMIT, attributes ); - updateVisibility( m_displacement.autoBumpVisibility, g_specularTransmitVisibilityAutoBumpAttributeName, AI_RAY_SPECULAR_TRANSMIT, attributes ); - updateVisibility( m_displacement.autoBumpVisibility, g_volumeVisibilityAutoBumpAttributeName, AI_RAY_VOLUME, attributes ); - updateVisibility( m_displacement.autoBumpVisibility, g_subsurfaceVisibilityAutoBumpAttributeName, AI_RAY_SUBSURFACE, attributes ); if( const IECore::BoolData *d = attribute<IECore::BoolData>( g_doubleSidedAttributeName, attributes ) ) { @@ -1340,6 +1333,13 @@ class ArnoldAttributes : public IECoreScenePreview::Renderer::AttributesInterfac zeroValue = attributeValue<float>( g_dispZeroValueAttributeName, attributes, 0.0f ); autoBump = attributeValue<bool>( g_dispAutoBumpAttributeName, attributes, false ); autoBumpVisibility = AI_RAY_CAMERA; + updateVisibility( autoBumpVisibility, g_cameraVisibilityAutoBumpAttributeName, AI_RAY_CAMERA, attributes ); + updateVisibility( autoBumpVisibility, g_diffuseReflectVisibilityAutoBumpAttributeName, AI_RAY_DIFFUSE_REFLECT, attributes ); + updateVisibility( autoBumpVisibility, g_specularReflectVisibilityAutoBumpAttributeName, AI_RAY_SPECULAR_REFLECT, attributes ); + updateVisibility( autoBumpVisibility, g_diffuseTransmitVisibilityAutoBumpAttributeName, AI_RAY_DIFFUSE_TRANSMIT, attributes ); + updateVisibility( autoBumpVisibility, g_specularTransmitVisibilityAutoBumpAttributeName, AI_RAY_SPECULAR_TRANSMIT, attributes ); + updateVisibility( autoBumpVisibility, g_volumeVisibilityAutoBumpAttributeName, AI_RAY_VOLUME, attributes ); + updateVisibility( autoBumpVisibility, g_subsurfaceVisibilityAutoBumpAttributeName, AI_RAY_SUBSURFACE, attributes ); } ArnoldShaderPtr map; @@ -1560,7 +1560,7 @@ class ArnoldAttributes : public IECoreScenePreview::Renderer::AttributesInterfac return data ? data->readable() : boost::optional<T>(); } - void updateVisibility( unsigned char &visibility, const IECore::InternedString &name, unsigned char rayType, const IECore::CompoundObject *attributes ) + static void updateVisibility( unsigned char &visibility, const IECore::InternedString &name, unsigned char rayType, const IECore::CompoundObject *attributes ) { if( const IECore::BoolData *d = attribute<IECore::BoolData>( name, attributes ) ) {
[clanup] Remove pywikibot.QuitKeyboardInterrupt deprecated for 6 years also do not publish private _QuitKeyboardInterrupt class
@@ -45,9 +45,6 @@ from pywikibot.bot import ( show_help, ui, ) -from pywikibot.bot_choice import ( - QuitKeyboardInterrupt as _QuitKeyboardInterrupt, -) from pywikibot.diff import PatchManager from pywikibot.family import AutoFamily, Family from pywikibot.i18n import translate @@ -128,14 +125,14 @@ __all__ = ( 'NoCreateError', 'NoMoveTarget', 'NoPage', 'NoUsername', 'NoWikibaseEntity', 'OtherPageSaveError', 'output', 'Page', 'PageCreatedConflict', 'PageDeletedConflict', 'PageRelatedError', - 'PageSaveRelatedError', 'PropertyPage', '_QuitKeyboardInterrupt', - 'SectionError', 'Server414Error', 'Server504Error', 'ServerError', - 'showDiff', 'show_help', 'Site', 'SiteDefinitionError', 'SiteLink', - 'SpamblacklistError', 'stdout', 'Timestamp', 'TitleblacklistError', - 'translate', 'ui', 'unicode2html', 'UnknownExtension', 'UnknownFamily', - 'UnknownSite', 'UnsupportedPage', 'UploadWarning', 'url2unicode', 'User', - 'warning', 'WbGeoShape', 'WbMonolingualText', 'WbQuantity', - 'WbTabularData', 'WbTime', 'WbUnknown', 'WikiBaseError', 'WikidataBot', + 'PageSaveRelatedError', 'PropertyPage', 'SectionError', 'Server414Error', + 'Server504Error', 'ServerError', 'showDiff', 'show_help', 'Site', + 'SiteDefinitionError', 'SiteLink', 'SpamblacklistError', 'stdout', + 'Timestamp', 'TitleblacklistError', 'translate', 'ui', 'unicode2html', + 'UnknownExtension', 'UnknownFamily', 'UnknownSite', 'UnsupportedPage', + 'UploadWarning', 'url2unicode', 'User', 'warning', 'WbGeoShape', + 'WbMonolingualText', 'WbQuantity', 'WbTabularData', 'WbTime', 'WbUnknown', + 'WikiBaseError', 'WikidataBot', ) @@ -1356,11 +1353,6 @@ _putthread.setName('Put-Thread') _putthread.setDaemon(True) wrapper = _ModuleDeprecationWrapper(__name__) -wrapper._add_deprecated_attr( - 'QuitKeyboardInterrupt', _QuitKeyboardInterrupt, - warning_message='pywikibot.QuitKeyboardInterrupt is deprecated; ' - 'use pywikibot.bot.QuitKeyboardInterrupt instead.', - since='20150619', future_warning=True) wrapper._add_deprecated_attr('__release__', __version__, replacement_name='pywikibot.__version__', since='20200707')
voctocore: added no-signal message to offline sources every source got a compositor that underlies a 'testvideosrc ! textoverlay' to show "NO SIGNAL" on a black background, if source is not sending anymore.
@@ -56,8 +56,22 @@ bin.( if self.has_video: self.bin += """ + videotestsrc + pattern=black + ! textoverlay + text=\"NO SIGNAL\" + valignment=center + halignment=center + font-desc="Roboto, 20" + ! {vcaps} + ! compositor-{name}. + {videoport} ! {vcaps} + ! compositor-{name}. + + compositor + name=compositor-{name} ! tee name=video-{name}""".format( videoport=self.build_videoport(),
Remove creation of database line of rule `test` from Makefile. This goes to a script that runs when the cointainer ups.
@@ -62,8 +62,6 @@ run_migrate: # run all migrations @cd dbaas && python manage.py syncdb --migrate --noinput --no-initial-data test: # run tests - # @echo "create database IF NOT EXISTS dbaas;" | mysql -u root - @mysqladmin -uroot -p$(DBAAS_DATABASE_PASSWORD) -f drop test_dbaas -h$(DBAAS_DATABASE_HOST); true @cd dbaas && python manage.py test --settings=dbaas.settings_test --traceback $(filter-out $@,$(MAKECMDGOALS))
improve method for determining position compares the centroid to a history of bounding boxes
@@ -20,7 +20,9 @@ class ObjectTracker: def __init__(self, config: DetectConfig): self.tracked_objects = {} self.disappeared = {} + self.positions = {} self.max_disappeared = config.max_disappeared + self.detect_config = config def register(self, index, obj): rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) @@ -30,17 +32,71 @@ class ObjectTracker: obj["motionless_count"] = 0 self.tracked_objects[id] = obj self.disappeared[id] = 0 + self.positions[id] = { + "xmins": [], + "ymins": [], + "xmaxs": [], + "ymaxs": [], + "xmin": 0, + "ymin": 0, + "xmax": self.detect_config.width, + "ymax": self.detect_config.height, + } def deregister(self, id): del self.tracked_objects[id] del self.disappeared[id] - def update(self, id, new_obj): - self.disappeared[id] = 0 + # tracks the current position of the object based on the last 10 bounding boxes + # returns False if the object has moved outside its previous position + def update_position(self, id, box): + position = self.positions[id] + xmin, ymin, xmax, ymax = box + + # get the centroid + x = (xmax + xmin) / 2 + y = (ymax + ymin) / 2 + + # if the centroid of this box is outside the computed bounding box + # assume the object has moved to a new position and reset the computed box + # TODO: should this only happen if there are a few boxes? if ( - intersection_over_union(self.tracked_objects[id]["box"], new_obj["box"]) - > 0.9 + x < position["xmin"] + or x > position["xmax"] + or y < position["ymin"] + or y > position["ymax"] ): + position = { + "xmins": [xmin], + "ymins": [ymin], + "xmaxs": [xmax], + "ymaxs": [ymax], + "xmin": xmin, + "ymin": ymin, + "xmax": xmax, + "ymax": ymax, + } + return False + + # if there are less than 10 entries for the position, add the bounding box + # and recompute the position box + if len(position["xmins"]) < 10: + position["xmins"].append(xmin) + position["ymins"].append(ymin) + position["xmaxs"].append(xmax) + position["ymaxs"].append(ymax) + # by using percentiles here, we hopefully remove outliers + position["xmin"] = np.percentile(position["xmins"], 15) + position["ymin"] = np.percentile(position["ymins"], 15) + position["xmax"] = np.percentile(position["xmaxs"], 85) + position["ymax"] = np.percentile(position["ymaxs"], 85) + + return True + + def update(self, id, new_obj): + self.disappeared[id] = 0 + # update the motionless count if the object has not moved to a new position + if self.update_position(id, new_obj["box"]): self.tracked_objects[id]["motionless_count"] += 1 else: self.tracked_objects[id]["motionless_count"] = 0
Fix [Linux] disk_io_counters() fails on Linux kernel 4.18+ Linux kernel 4.18+ added 4 fields, ingore them and parse the rest as usual.
@@ -1060,6 +1060,8 @@ def disk_io_counters(perdisk=False): # ...unless (Linux 2.6) the line refers to a partition instead # of a disk, in which case the line has less fields (7): # "3 1 hda1 8 8 8 8" + # 4.18+ has 4 fields added: + # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0" # See: # https://www.kernel.org/doc/Documentation/iostats.txt # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats @@ -1074,7 +1076,7 @@ def disk_io_counters(perdisk=False): reads = int(fields[2]) (reads_merged, rbytes, rtime, writes, writes_merged, wbytes, wtime, _, busy_time, _) = map(int, fields[4:14]) - elif flen == 14: + elif flen == 14 or flen == 18: # Linux 2.6+, line referring to a disk name = fields[2] (reads, reads_merged, rbytes, rtime, writes, writes_merged,
Add option to exclude priors from charts (excluded by default) Thanks!
@@ -1123,12 +1123,14 @@ def api_get_progress_info(project_id): # noqa: F401 def api_get_progress_density(project_id): """Get progress density of a project""" + include_priors = request.args.get('priors', False, type=bool) + try: # get label history project_path = get_project_path(project_id) with open_state(project_path) as s: - data = s.get_labels() + data = s.get_labels(priors=include_priors) # create a dataset with the rolling mean of every 10 papers df = data \ @@ -1181,10 +1183,12 @@ def api_get_progress_density(project_id): def api_get_progress_recall(project_id): """Get cumulative number of inclusions by ASReview/at random""" + include_priors = request.args.get('priors', False, type=bool) + project_path = get_project_path(project_id) try: with open_state(project_path) as s: - data = s.get_labels() + data = s.get_labels(priors=include_priors) n_records = len(s.get_record_table()) # create a dataset with the cumulative number of inclusions
enhancement: [cli] print inputs info also if those types are unknown Make cli prints out inputs (files) info also if it failed to detect those types from file names.
@@ -346,7 +346,9 @@ def _load_diff(args, extra_opts): _exit_with_output("Wrong input type '%s'" % args.itype, 1) except API.UnknownFileTypeError: _exit_with_output("No appropriate backend was found for given file " - "'%s'" % args.itype, 1) + "type='%s', inputs=%s" % (args.itype, + ", ".join(args.inputs)), + 1) _exit_if_load_failure(diff, "Failed to load: args=%s" % ", ".join(args.inputs))
STY: Fix C++ style comment. [ci skip]
@@ -2099,7 +2099,7 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds return NULL; } - // binary mode, condition copied from PyArray_FromString + /* binary mode, condition copied from PyArray_FromString */ if (sep == NULL || strlen(sep) == 0) { /* Numpy 1.14, 2017-10-19 */ if (DEPRECATE(
Update ports for push notifications Since the Mattermost server needs to reach these push proxy ports it makes sense to define which one has to be opened in the firewalls.
@@ -18,6 +18,8 @@ After purchasing a subscription to Mattermost E10 or higher from Mattermost, Inc Both TPNS and HPNS only work with the Mattermost Apple App Store and Google Play apps. If you have compiled the apps yourselves, you must also host your own Mattermost push proxy server. See our FAQ on :ref:`how push notifications work <push-faq>` for more details. +If you use HPNS you need to ensure that the push proxy can be reached on port 443 from the Mattermost server, for TPNS it is port 80. If you host your own proxy server the default port is 8086. + Setting up HPNS push notifications in Enterprise Edition --------------------------------------------------------
Update paper.bib fix authors order in scoop ref
volume = "", number = "", pages = "", - author = "Detoc, Jerome and Garo, Mickael and Carval, Thierry and Thepault, Baptiste and Mahoudo, Pierre", + author = "Detoc, Jerome and Thepault, Baptiste and Carval, Thierry and Mahoudo, Pierre and Garo, Mickael", url = "", organization = "", address = "",