message
stringlengths
13
484
diff
stringlengths
38
4.63k
Removed unused mkdir data path It never worked because data_path is a "special" address, so it always generated an exception, and in case the folder is created by init cache
@@ -242,11 +242,6 @@ class GlobalVariables(object): self.settings_monitor_suspend(False) # Reset the value in case of addon crash - try: - os.mkdir(self.DATA_PATH) - except OSError: - pass - self._init_cache() def _init_database(self, initialize):
Filtering hotfix Bug caused by an outdated function signature in a previous commit in the PR
@@ -240,7 +240,13 @@ class Filtering(Cog): # We also do not need to worry about filters that take the full message, # since all we have is an arbitrary string. if _filter["enabled"] and _filter["content_only"]: - match, reason = await _filter["function"](result) + filter_result = await _filter["function"](result) + reason = None + + if isinstance(filter_result, tuple): + match, reason = filter_result + else: + match = filter_result if match: # If this is a filter (not a watchlist), we set the variable so we know
avoid calling front() on empty working set Summary: Pull Request resolved: ghimport-source-id:
@@ -865,6 +865,9 @@ class AliasDb::WorkingSet { private: bool hasDataDependency(Node* n) const { + if (!mover_ && nodes_.empty()) { + return false; + } const Node* pivot = mover_ ? mover_ : nodes_.front(); if (n->isAfter(pivot)) { return producesFor(n);
fix(get_sector_futures): fix get_sector_futures interface fix get_sector_futures interface
@@ -84,15 +84,16 @@ short_headers = { } long_headers = { - 'Accept': 'text/plain, */*; q=0.01', - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'zh-CN,zh;q=0.8', - 'Connection': 'keep-alive', - 'Content-Length': '143', - 'Content-Type': 'application/x-www-form-urlencoded', - 'Host': 'cn.investing.com', - 'Origin': 'https://cn.investing.com', - 'Referer': 'https://cn.investing.com/commodities/crude-oil-historical-data', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36', - 'X-Requested-With': 'XMLHttpRequest' + 'accept': 'text/plain, */*; q=0.01', + # 'accept-encoding': 'gzip, deflate, br', + 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8', + 'cache-control': 'no-cache', + 'content-length': '267', + 'content-type': 'application/x-www-form-urlencoded', + 'origin': 'https://cn.investing.com', + 'referer': 'https://cn.investing.com/commodities/brent-oil-historical-data', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36', + 'x-requested-with': 'XMLHttpRequest', + 'cookie': '__cfduid=d70e980b1bfd36321c37ad1061cd115911617875885; adBlockerNewUserDomains=1617875886; firstUdid=0; smd=dabb4951e74ef3424f3c6f7a7bb3d863-1617875885; udid=dabb4951e74ef3424f3c6f7a7bb3d863; PHPSESSID=o944e019q9cun1iremuhf4kl8r; StickySession=id.55850445256.200cn.investing.com; __cflb=0H28uxmf5JNxjDUC6WDvQUEoJyvKUTqpsPTyA93jsgx; adsFreeSalePopUp=3; geoC=HK; nyxDorf=Njc0YDF5Nmg3ZWxgNXg5OmAyZT1mfzY1NDRmYQ%3D%3D' + }
make the filename as md5 hash when passing raw data fixed the following: 1. change from filename to _file = human readable name 2. filename = md5 hash when loaded as raw data. This is so that the session will have a proper filename
@@ -4,6 +4,7 @@ standard_library.install_aliases() from androguard import session from androguard.decompiler import decompiler from androguard.core import androconf +import hashlib import logging log = logging.getLogger("androguard.misc") @@ -25,13 +26,13 @@ def get_default_session(): return androconf.CONF["SESSION"] -def AnalyzeAPK(filename, session=None, raw=False): +def AnalyzeAPK(_file, session=None, raw=False): """ Analyze an android application and setup all stuff for a more quickly analysis ! :param session: A session (default None) - :param filename: the filename of the android application or a buffer which represents the application - :type filename: string + :param _file: the filename of the android application or a buffer which represents the application + :type _file: string or bytes :rtype: return the :class:`APK`, :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects """ @@ -41,10 +42,12 @@ def AnalyzeAPK(filename, session=None, raw=False): session = get_default_session() if raw: - data = filename + data = _file + filename = hashlib.md5(_file).hexdigest() else: - with open(filename, "rb") as fd: + with open(_file, "rb") as fd: data = fd.read() + filename = _file session.add(filename, data) return session.get_objects_apk(filename)
Fix bug of KG train.py script. It cannot work when only mxnet backend is installed.
from dataloader import EvalDataset, TrainDataset, NewBidirectionalOneShotIterator from dataloader import get_dataset -import torch.multiprocessing as mp import argparse import os @@ -9,10 +8,12 @@ import time backend = os.environ.get('DGLBACKEND') if backend.lower() == 'mxnet': + import multiprocessing as mp from train_mxnet import load_model from train_mxnet import train from train_mxnet import test else: + import torch.multiprocessing as mp from train_pytorch import load_model from train_pytorch import train from train_pytorch import test
Fix model test error When testing models, mixer raises an error and assert couldn't catch IntegrityError.
@@ -2,6 +2,7 @@ from django.test import TestCase from django.core.exceptions import ValidationError from django.db.utils import IntegrityError from mixer.backend.django import mixer +from server.models import Label, DocumentAnnotation, SequenceAnnotation, Seq2seqAnnotation class TestProject(TestCase): @@ -12,7 +13,7 @@ class TestProject(TestCase): def test_get_progress(self): project = mixer.blend('server.Project') - res = project.get_progress() + res = project.get_progress(None) self.assertEqual(res['total'], 0) self.assertEqual(res['remaining'], 0) @@ -23,41 +24,33 @@ class TestLabel(TestCase): label = mixer.blend('server.Label') mixer.blend('server.Label', shortcut=label.shortcut) with self.assertRaises(IntegrityError): - mixer.blend('server.Label', - project=label.project, - shortcut=label.shortcut) + Label(project=label.project, shortcut=label.shortcut).save() def test_text_uniqueness(self): label = mixer.blend('server.Label') mixer.blend('server.Label', text=label.text) with self.assertRaises(IntegrityError): - mixer.blend('server.Label', - project=label.project, - text=label.text) + Label(project=label.project, text=label.text).save() class TestDocumentAnnotation(TestCase): def test_uniqueness(self): - annotation1 = mixer.blend('server.DocumentAnnotation') + a = mixer.blend('server.DocumentAnnotation') with self.assertRaises(IntegrityError): - mixer.blend('server.DocumentAnnotation', - document=annotation1.document, - user=annotation1.user, - label=annotation1.label) + DocumentAnnotation(document=a.document, user=a.user, label=a.label).save() class TestSequenceAnnotation(TestCase): def test_uniqueness(self): - annotation1 = mixer.blend('server.SequenceAnnotation') + a = mixer.blend('server.SequenceAnnotation') with self.assertRaises(IntegrityError): - mixer.blend('server.SequenceAnnotation', - document=annotation1.document, - user=annotation1.user, - label=annotation1.label, - start_offset=annotation1.start_offset, - end_offset=annotation1.end_offset) + SequenceAnnotation(document=a.document, + user=a.user, + label=a.label, + start_offset=a.start_offset, + end_offset=a.end_offset).save() def test_position_constraint(self): with self.assertRaises(ValidationError): @@ -68,9 +61,8 @@ class TestSequenceAnnotation(TestCase): class TestSeq2seqAnnotation(TestCase): def test_uniqueness(self): - annotation1 = mixer.blend('server.Seq2seqAnnotation') + a = mixer.blend('server.Seq2seqAnnotation') with self.assertRaises(IntegrityError): - mixer.blend('server.Seq2seqAnnotation', - document=annotation1.document, - user=annotation1.user, - text=annotation1.text) + Seq2seqAnnotation(document=a.document, + user=a.user, + text=a.text).save()
Add missing dependencies for fedora image Problem: We use `mock` now to build against different fedora versions. Solution: Install `mock` during bootstrapping.
FROM fedora:35 RUN dnf update -y RUN dnf install -y libev-devel gmp-devel hidapi-devel libffi-devel zlib-devel libpq-devel m4 perl git pkg-config \ - rpmdevtools python3-devel python3-setuptools wget opam rsync which cargo autoconf systemd systemd-rpm-macros + rpmdevtools python3-devel python3-setuptools wget opam rsync which cargo autoconf mock systemd systemd-rpm-macros ENV USER dockerbuilder RUN useradd dockerbuilder && mkdir /tezos-packaging ENV HOME /tezos-packaging @@ -17,4 +17,3 @@ COPY docker/package/defaults /tezos-packaging/docker/package/defaults COPY docker/package/scripts /tezos-packaging/docker/package/scripts COPY LICENSE /tezos-packaging/LICENSE ENTRYPOINT ["python3", "-m", "package.package_generator"] -
Loosen the upper bound of flatbuffer to make it work with TF[1]. [1]
@@ -182,7 +182,7 @@ def make_extra_packages_examples(): # Required for bert examples in tfx/examples/bert 'tensorflow-text>=1.15.1,<3', # Required for tfx/examples/cifar10 - 'flatbuffers>=1.12,<2', + 'flatbuffers>=1.12,<3', 'tflite-support>=0.1.0a1,<0.1.1', # Required for tfx/examples/penguin/experimental # LINT.IfChange
txid --> id KISS: A transaction is a resource as every other. Let's not give it a special id (like 'txid'), but simply a regular id.
@@ -42,15 +42,15 @@ with something like the following in the body: Transactions ------------------- -.. http:get:: /transactions/{txid} +.. http:get:: /transactions/{id} - Get the transaction with the ID ``txid``. + Get the transaction with the ID ``id``. This endpoint returns only a transaction from a ``VALID`` or ``UNDECIDED`` block on ``bigchain``, if exists. - :param txid: transaction ID - :type txid: hex string + :param id: transaction ID + :type id: hex string **Example request**: @@ -148,10 +148,10 @@ Transactions Statuses -------------------------------- -.. http:get:: /statuses/{txid} +.. http:get:: /statuses/{id} - Get the status of a transaction with the ID ``txid``, if a transaction - with that ``txid`` exists. + Get the status of a transaction with the ID ``id``, if a transaction + with that ``id`` exists. The possible status values are ``backlog``, ``undecided``, ``valid`` or ``invalid``. @@ -160,8 +160,8 @@ Statuses ``valid`` or ``undecided``, a ``303 See Other`` status code is returned, as well as a URL to the resource in the location header. - :param txid: transaction ID - :type txid: hex string + :param id: transaction ID + :type id: hex string **Example request**:
Add environment dict to lint calls Expose standard MOLECULE environment variables to the lint commands to improve processing
@@ -93,7 +93,13 @@ def execute(self): try: LOG.info("Executing: %s" % cmd) - run(cmd, shell=True, universal_newlines=True, check=True) + run( + cmd, + env=self._config.env, + shell=True, + universal_newlines=True, + check=True, + ) except Exception as e: util.sysexit_with_message("Lint failed: %s: %s" % (e, e))
container: don't install the engine on all clients We only need the container engine to be installed on the first clients node in order to execute the pools/keys operation. We already do the same worflow with the ceph-container-common role which pull the ceph container image.
- import_role: name: ceph-container-engine tags: with_pkg + when: (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first) - import_role: name: ceph-container-common tags: fetch_container_image
DOC: added `See Also` section Improved docstring by pointing user to information about correct format options for `supported_tags` kwarg.
@@ -66,8 +66,9 @@ def list_files(tag=None, inst_id=None, data_path=None, format_str=None, User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) supported_tags : dict or NoneType - keys are inst_id, each containing a dict keyed by tag - where the values file format template strings. (default=None) + Keys are inst_id, each containing a dict keyed by tag + where the values file format template strings. See Files.from_os + `format_str` kwarg for more details. (default=None) file_cadence : dt.timedelta or pds.DateOffset pysat assumes a daily file cadence, but some instrument data file contain longer periods of time. This parameter allows the specification @@ -87,6 +88,10 @@ def list_files(tag=None, inst_id=None, data_path=None, format_str=None, out : pysat.Files.from_os : pysat._files.Files A class containing the verified available files + See Also + -------- + pysat.Files.from_os + Examples -------- ::
Update LICENSE * Update LICENSE STFC --> UKRI current year * Date range 2012 is the date of the first commit. The project began in 2011 though
-Copyright (c) 2015 Diamond Light Source, Lawrence Berkeley National Laboratory, and the Science and Technology Facilities Council. +Copyright (c) 2012-2019 Diamond Light Source, Lawrence Berkeley National Laboratory, and United Kingdom Research and Innovation. All rights reserved. Redistribution and use in source and binary forms, with or without
DOC: minor formatting tweak to sample notebooks This pull request fixes the mis-aligned indentation in the "sample notebooks" page in the docs introduced in
@@ -8,8 +8,7 @@ Sample Notebooks These tools include three types of example Jupyter notebooks. -1. `Sample Jdaviz notebooks <https://github.com/spacetelescope/jdaviz/tree/main/notebooks>`_ that illustrate how to use Jdaviz and various API calls. These notebooks are located in the ``notebooks`` sub-directory -of the git repository. +1. `Sample Jdaviz notebooks <https://github.com/spacetelescope/jdaviz/tree/main/notebooks>`_ that illustrate how to use Jdaviz and various API calls. These notebooks are located in the ``notebooks`` sub-directory of the git repository. 2. `Sample JDAT notebooks <https://jwst-docs.stsci.edu/jwst-post-pipeline-data-analysis/example-jupyter-notebooks-data-analysis-tools>`_ that illustrate likely science workflows with data obtained from the various JWST instruments. These notebook incorporate ``astropy`` and Jdaviz when possible.
MNT: move column filtering up in BEC When we moved the table logic below the plotting we no longer were over-writing the `columns` local which change which code path we went down in the plotting.
@@ -245,6 +245,10 @@ class BestEffortCallback(QtAwareCallback): **share_kwargs) axes = fig.axes + # Ensure that no independent variables ('dimensions') are + # duplicated here. + columns = [c for c in columns if c not in self.all_dim_fields] + # ## LIVE PLOT AND PEAK ANALYSIS ## # if ndims == 1: @@ -337,13 +341,10 @@ class BestEffortCallback(QtAwareCallback): fig.tight_layout() except ValueError: pass + # ## TABLE ## # if stream_name == self.dim_stream: - # Ensure that no independent variables ('dimensions') are - # duplicated here. - columns = [c for c in columns if c not in self.all_dim_fields] - if self._table_enabled: # plot everything, independent or dependent variables self._table = LiveTable(list(self.all_dim_fields) + columns)
Use correct keys from group_info in group_install. Fixes
@@ -2597,9 +2597,9 @@ def group_install(name, targets = [] for group in groups: group_detail = group_info(group) - targets.extend(group_detail.get('mandatory packages', [])) + targets.extend(group_detail.get('mandatory', [])) targets.extend( - [pkg for pkg in group_detail.get('default packages', []) + [pkg for pkg in group_detail.get('default', []) if pkg not in skip] ) if include:
Fixing speech rate and PEP 8 Changing the speech rate to 170 for smoother voice experience and adding blank lines to adapt to PEP 8 guidelines.
@@ -10,13 +10,13 @@ if IS_MACOS: else: import pyttsx3 -def create_voice(self, gtts_status, rate=120): +def create_voice(self, gtts_status, rate=180): """ Checks that status of gtts engine, and calls the correct speech engine :param rate: Speech rate for the engine (if supported by the OS) """ - if gtts_status==True: + if gtts_status is True: return VoiceGTTS() else: @@ -61,6 +61,7 @@ class VoiceGTTS(): playsound("voice.mp3") os.remove("voice.mp3") + class VoiceMac(): def text_to_speech(self, speech): speech = remove_ansi_escape_seq(speech) @@ -141,7 +142,7 @@ class VoiceWin(): :return: Nothing to return. """ self.engine = pyttsx3.init() - self.engine.setProperty('rate', 180) # setting up new voice rate + self.engine.setProperty('rate', self.rate) def destroy(self): """ @@ -162,6 +163,7 @@ class VoiceWin(): """ speech = remove_ansi_escape_seq(speech) self.create() + self.engine.setProperty('rate', 170) # setting up new voice rate voices = self.engine.getProperty('voices') # getting details of current voice self.engine.setProperty('voices', voices[1].id) # changing index, changes voices. 1 for female self.engine.say(speech) @@ -184,7 +186,6 @@ class VoiceWin(): self.rate = self.rate + delta - class VoiceNotSupported(): def __init__(self): self.warning_print = False
Add CHANGELOG entries for 0.6.6 See also the 0.6-maintenance branch.
+0.6.6 2018-08-27 +---------------- + +* Bugfix add type conversion to getlist (on multidicts) +* Bugfix correct ASGI client usage (allows for None) +* Bugfix ensure overlapping requests work without destroying the + others context. +* Bugfix ensure only integer status codes are accepted. + 0.6.5 2018-08-05 ----------------
FFU support for ceph_nfs This change introduces the steps needed to dump the set of playbook used to perform the ceph-nfs update. Depends-On:
@@ -71,7 +71,49 @@ outputs: dport: # We support only NFS 4.1 to start - 2049 - upgrade_tasks: [] + upgrade_tasks: + - name: Create hiera data to upgrade ceph_nfs in a stepwise manner. + when: + - step|int == 1 + - cluster_recreate|bool + block: + - name: set ceph_nfs upgrade node facts in a single-node environment + set_fact: + ceph_nfs_short_node_names_upgraded: "{{ ceph_nfs_short_node_names }}" + cacheable: no + when: groups['ceph_nfs'] | length <= 1 + - name: set ceph_nfs upgrade node facts from the limit option + set_fact: + ceph_nfs_short_node_names_upgraded: "{{ ceph_nfs_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}" + cacheable: no + when: + - groups['ceph_nfs'] | length > 1 + - item.split('.')[0] in ansible_limit.split(':') + loop: "{{ ceph_nfs_short_node_names | default([]) }}" + - fail: + msg: > + You can't upgrade ceph_nfs without staged + upgrade. You need to use the limit option in order + to do so. + when: >- + ceph_nfs_short_node_names_upgraded is not defined or + ceph_nfs_short_node_names_upgraded | length == 0 + - debug: + msg: "Prepare ceph_nfs upgrade for {{ ceph_nfs_short_node_names_upgraded }}" + - name: add the ceph_nfs short name to hiera data for the upgrade. + include_role: + name: tripleo_upgrade_hiera + tasks_from: set.yml + vars: + tripleo_upgrade_key: ceph_nfs_short_node_names_override + tripleo_upgrade_value: "{{ceph_nfs_short_node_names_upgraded}}" + - name: remove the extra hiera data needed for the upgrade. + include_role: + name: tripleo_upgrade_hiera + tasks_from: remove.yml + vars: + tripleo_upgrade_key: ceph_nfs_short_node_names_override + when: ceph_nfs_short_node_names_upgraded | length == ceph_nfs_short_node_names | length step_config: 'include tripleo::profile::pacemaker::ceph_nfs' puppet_config: config_image: ''
Remove useless options from CORS init This can be merged only after is merged
@@ -61,20 +61,7 @@ def create_app(*, debug=False, threads=4): app = Flask(__name__) - CORS(app, - allow_headers=( - 'x-requested-with', - 'content-type', - 'accept', - 'origin', - 'authorization', - 'x-csrftoken', - 'withcredentials', - 'cache-control', - 'cookie', - 'session-id', - ), - supports_credentials=True) + CORS(app) app.debug = debug
Filter out all header links through tags Replacing the paragraph sign on the string missed some other symbols that were used for permalinks.
@@ -129,6 +129,9 @@ def get_signatures(start_signature: PageElement) -> List[str]: start_signature, *_find_next_siblings_until_tag(start_signature, ("dd",), limit=2), )[-MAX_SIGNATURE_AMOUNT:]: + for tag in element.find_all("a", class_="headerlink", recursive=False): + tag.decompose() + signature = _UNWANTED_SIGNATURE_SYMBOLS_RE.sub("", element.text) if signature:
Fix tooltip again Fixes
<link rel="stylesheet" type="text/css" href="{{ url_for('static', filename='css/main.css') }}"> <script> - window.addEventListener('DOMContentLoaded', (event) => { - - var kb = document.getElementById("keyboard"); - for (var i = 0; i < codepage.length; i++) { - kb.innerHTML += (`<span class="key" style="text-align:center;" data-title="${repr(codepage_descriptions[i])}">${codepage[i]}</span><div class="tooltip">${repr(codepage_descriptions[i])}<div class="arrow" data-popper-arrow></div></div>`); - } + let pops = []; + function setupKeys() { document.querySelectorAll('.key').forEach(item => { item.addEventListener('click', event => { - var char = replaceHTMLChar(event.target.innerHTML) - var cm = globalThis[`e_${selectedBox}`] - cm.replaceSelection(char) - cm.save() - cm.focus() - updateCount() + var char = replaceHTMLChar(event.target.innerHTML); + var cm = globalThis[`e_${selectedBox}`]; + cm.replaceSelection(char); + cm.save(); + cm.focus(); + updateCount(); }); const tooltip = item.nextSibling; const pop = Popper.createPopper(item, tooltip, { }, ], }); + pops.push(pop); item.addEventListener('mouseenter', () => { - console.log(item); tooltip.setAttribute('data-show', ''); pop.update(); }); item.addEventListener('mouseleave', () => { tooltip.removeAttribute('data-show'); }); - }) + }); + } + window.addEventListener('DOMContentLoaded', (event) => { + var kb = document.getElementById("keyboard"); + for (var i = 0; i < codepage.length; i++) { + kb.innerHTML += (`<span class="key" style="text-align:center;" data-title="${repr(codepage_descriptions[i])}">${codepage[i]}</span><div class="tooltip">${repr(codepage_descriptions[i])}<div class="arrow" data-popper-arrow></div></div>`); + } og_keyboard_html = document.getElementById("keyboard").innerHTML; + setupKeys(); }); function glyphSearch() { } } else { document.getElementById("keyboard").innerHTML = og_keyboard_html; - document.querySelectorAll('.key').forEach(item => { - item.addEventListener('click', event => { - var char = replaceHTMLChar(event.target.innerHTML) - var cm = globalThis[`e_${selectedBox}`] - cm.replaceSelection(char) - cm.save() - cm.focus() - updateCount() - }) - }) + pops.forEach(p => p.destroy()); + pops = []; + setupKeys(); } }
[otBase] fix array-reader to return list, not array.array Was not noticed because it was for the most part unused.
@@ -146,7 +146,7 @@ class OTTableReader(object): value = array.array(typecode, self.data[pos:newpos]) if sys.byteorder != "big": value.byteswap() self.pos = newpos - return value + return value.tolist() def readInt8(self): return self.readValue("b", staticSize=1)
Use heartbeat period for HTEX worker watchdog Previously, this used 1000 x the tight loop poll period. This was awkward to understand and describe. The heartbeat period is more relevant as a configuration option for setting the time scale on which broken components will be discovered.
@@ -109,7 +109,8 @@ class Manager(object): assumes that the interchange is lost and the manager shuts down. Default:120 heartbeat_period : int - Number of seconds after which a heartbeat message is sent to the interchange + Number of seconds after which a heartbeat message is sent to the interchange, and workers + are checked for liveness. poll_period : int Timeout period used by the manager in milliseconds. Default: 10ms @@ -394,7 +395,7 @@ class Manager(object): ), name="HTEX-Worker-{}".format(worker_id)) self.procs[worker_id] = p logger.info("Worker {} has been restarted".format(worker_id)) - time.sleep(self.poll_period) + time.sleep(self.heartbeat_period) logger.critical("Exiting")
no mocking at all testing out mocks in read the docs
@@ -52,7 +52,7 @@ MOCK_MODULES = ['json', 'skimage', 'skimage.transform', 'skimage.transform._geometric'] -sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) +#sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the
Implement a fake "cr_frame" property for Cython async coroutines that always returns None, just to prevent an AttributeError. Closes
@@ -1226,6 +1226,13 @@ static void __Pyx_Coroutine_del(PyObject *self) { #endif } +static PyObject * +__Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self) +{ + // Fake implementation that always returns None, but at least does not raise an AttributeError. + Py_RETURN_NONE; +} + static PyObject * __Pyx_Coroutine_get_name(__pyx_CoroutineObject *self) { @@ -1499,6 +1506,8 @@ static PyGetSetDef __pyx_Coroutine_getsets[] = { (char*) PyDoc_STR("name of the coroutine"), 0}, {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, (char*) PyDoc_STR("qualified name of the coroutine"), 0}, + {(char *) "cr_frame", (getter)__Pyx_Coroutine_get_frame, NULL, + (char*) PyDoc_STR("Frame of the coroutine"), 0}, {0, 0, 0, 0, 0} };
handle missing description for html reporting `report.description` is not properly configured, when the test execution fails. In such case, we should print the original error, not traceback related to missing description.
@@ -18,7 +18,10 @@ def pytest_html_results_table_row(report, cells): """ Add content to the column Description """ + try: cells.insert(2, html.td(report.description)) + except AttributeError: + cells.insert(2, html.td('--- no description ---')) @pytest.mark.hookwrapper
codacy coverage codacy coverage codacy coverage
@@ -16,6 +16,10 @@ cache: notifications: email: false +before_install: + - sudo apt-get install jq + - curl -LSs "$(curl -LSs https://api.github.com/repos/codacy/codacy-coverage-reporter/releases/latest | jq -r '.assets | map({name, browser_download_url} | select(.name | endswith(".jar"))) | .[0].browser_download_url')" -o codacy-coverage-reporter-assembly.jar + install: - pip install pytest-cov - pip install --quiet .[test,notebooks] @@ -29,6 +33,7 @@ script: after_success: - codecov - pip list + - java -jar codacy-coverage-reporter-assembly.jar report -l Python -r coverage.xml branches: only:
Add quaternary screenshot for AR Capture hospital information tab, tested locally!
@@ -371,6 +371,19 @@ tertiary: quaternary: + AR: + renderSettings: + clipRectangle: + width: 1400 + height: 1400 + overseerScript: > + page.manualWait(); + await page.waitForSelector("span.ember-view.tab-title:nth-of-type(7)"); + page.click("span.ember-view.tab-title:nth-of-type(7)"); + await page.waitForDelay(10000); + page.done(); + message: clicking on "Hospital Information" tab for AR quaternary + FL: file: pdf message: downloading pdf
Added chromosome filter Creates a whitelist of chromosomes to be considered. Highly tailored to Drosophila.
@@ -34,9 +34,20 @@ rule reads2fragments: "awk -v OFS='\\t' -v pos_offset=\"4\" -v neg_offset=\"5\" " "'{{ print($1, $2 - pos_offset , $6 + neg_offset ) }}' > {output}" -rule filterFragments: +rule filterChromosomes: input: os.path.join(outdir_MACS2, "{sample}.all.bedpe") + output: + os.path.join(outdir_MACS2, "{sample}.all_filtered.bedpe") + params: + chromlist = '^'+'(' + '|'.join( ('dmel_mitochondrion_genome', '3R','3L','2R', '2L', 'X', '4', 'Y') ) + ')' + '[[:space:]]' # dm6! + shell: + "egrep \'{params.chromlist}\' {input} > {output}" + + +rule filterNucleosomalFragments: + input: + os.path.join(outdir_MACS2, "{sample}.all_filtered.bedpe") output: os.path.join(outdir_MACS2, "{sample}.openchrom.bedpe") params: @@ -46,6 +57,7 @@ rule filterFragments: "awk -v cutoff={params.cutoff} -v OFS='\\t' \"{{ if(\$3-\$2 < cutoff) {{ print (\$0) }} }}\" > " "{output}" + # samtools view -b -f 2 -F 4 -F 8 -F 256 -F 512 -F 2048 rule callOpenChromatin:
Add some fixed/distinct colors Fixes
@@ -3,7 +3,21 @@ import randomColor from "randomcolor"; import { RESERVED_FIELDS } from "./labels"; -const FIXED_COLORS = ["red", "green", "blue"]; +const FIXED_COLORS = [ + "#ee0000", + "#ee6600", + "#993300", + "#996633", + "#999900", + "#009900", + "#003300", + "#009999", + "#000099", + "#0066ff", + "#6600ff", + "#cc33cc", + "#777799", +]; type Color = string; type ColorMap = { [name: string]: Color };
Remove periodic timeout in interchange task pull loop This loop was to check the kill event, which is no longer used by this thread (see PR so there is no need to loop repeatedly in the absence of messages.
@@ -141,7 +141,6 @@ class Interchange(object): self.context = zmq.Context() self.task_incoming = self.context.socket(zmq.DEALER) self.task_incoming.set_hwm(0) - self.task_incoming.RCVTIMEO = 10 # in milliseconds self.task_incoming.connect("tcp://{}:{}".format(client_address, client_ports[0])) self.results_outgoing = self.context.socket(zmq.DEALER) self.results_outgoing.set_hwm(0)
Update postSonarr.py fix episodefile endpoint using moviefile
@@ -98,16 +98,16 @@ def updateEpisode(baseURL, headers, new, episodeid, log): def getEpisodeFile(baseURL, headers, episodefileid, log): - url = baseURL + "/api/v3/moviefile/" + str(episodefileid) - log.debug("Requesting moviefile from Sonarr for moviefile via %s." % url) + url = baseURL + "/api/v3/episodefile/" + str(episodefileid) + log.debug("Requesting episodefile from Sonarr for episodefile via %s." % url) r = requests.get(url, headers=headers) payload = r.json() return payload def updateEpisodeFile(baseURL, headers, new, episodefileid, log): - url = baseURL + "/api/v3/moviefile/" + str(episodefileid) - log.debug("Requesting moviefile update to Sonarr via %s." % url) + url = baseURL + "/api/v3/episodefile/" + str(episodefileid) + log.debug("Requesting episodefile update to Sonarr via %s." % url) r = requests.put(url, json=new, headers=headers) payload = r.json() return payload
[modules/bluetooth] Make dbus destination configurable Add a parameter "dbus_destination" that allows a user to specify the DBUS destination. fixes
@@ -4,6 +4,7 @@ right click toggles bluetooth. Needs dbus-send to toggle bluetooth state. Parameters: * bluetooth.device : the device to read state from (default is hci0) * bluetooth.manager : application to launch on click (blueman-manager) + * bluetooth.dbus_destination : dbus destination (defaults to org.blueman.Mechanism) """ @@ -95,9 +96,11 @@ class Module(bumblebee.engine.Module): else: state = "true" - cmd = "dbus-send --system --print-reply --dest=org.blueman.Mechanism"\ + dst = self.parameter("dbus_destination", "org.blueman.Mechanism") + + cmd = "dbus-send --system --print-reply --dest={}"\ " / org.blueman.Mechanism.SetRfkillState"\ - " boolean:{}".format(state) + " boolean:{}".format(dst, state) bumblebee.util.execute(cmd)
btcpayserver update to v1.4.4 verify with nicolasdorier signature do not exit install if btcpay user exists
# https://github.com/dgarage/NBXplorer/tags NBXplorerVersion="v2.2.20" # https://github.com/btcpayserver/btcpayserver/releases -BTCPayVersion="v1.4.0" +BTCPayVersion="v1.4.4" PGPsigner="nicolasdorier" PGPpubkeyLink="https://keybase.io/nicolasdorier/pgp_keys.asc" @@ -257,7 +257,7 @@ if [ "$1" = "1" ] || [ "$1" = "on" ]; then isInstalled=$(sudo ls /etc/systemd/system/btcpayserver.service 2>/dev/null | grep -c 'btcpayserver.service') if [ ${isInstalled} -eq 0 ]; then # create btcpay user - sudo adduser --disabled-password --gecos "" btcpay || exit 1 + sudo adduser --disabled-password --gecos "" btcpay cd /home/btcpay || exit 1 # store BTCpay data on HDD @@ -429,8 +429,12 @@ btc.rpc.password=$PASSWORD_B sudo -u btcpay git clone https://github.com/btcpayserver/btcpayserver.git 2>/dev/null cd btcpayserver sudo -u btcpay git reset --hard $BTCPayVersion + + # sudo -u btcpay /home/admin/config.scripts/blitz.git-verify.sh \ + # "web-flow" "https://github.com/web-flow.gpg" "4AEE18F83AFDEB23" || exit 1 sudo -u btcpay /home/admin/config.scripts/blitz.git-verify.sh \ - "web-flow" "https://github.com/web-flow.gpg" "4AEE18F83AFDEB23" || exit 1 + "${PGPsigner}" "${PGPpubkeyLink}" "${PGPpubkeyFingerprint}" || exit 1 + echo "# Build BTCPayServer ..." # from the build.sh with path sudo -u btcpay /home/btcpay/dotnet/dotnet build -c Release /home/btcpay/btcpayserver/BTCPayServer/BTCPayServer.csproj
Actually require Python 3.7 in setup.py I had forgotten the bottom part of `setup.py`. Now it requires 3.7 and read the docs should work.
@@ -48,7 +48,6 @@ setuptools.setup( # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8',
pofile.py: Added new exception called PoFileError and thrown if flagged This new exception is thrown when the po parser finds an invalid pofile. This helps handle invalid po files that are parsed. Invalid po files may cause other possible errors such as a UnicodeEncodeError. Closes
@@ -19,6 +19,7 @@ from babel.util import wraptext from babel._compat import text_type + def unescape(string): r"""Reverse `escape` the given string. @@ -73,6 +74,15 @@ def denormalize(string): return unescape(string) +class PoFileError(Exception): + """Exception thrown by PoParser when an invalid po file is encountered.""" + def __init__(self, message, catalog, line, lineno): + super(PoFileError, self).__init__('{message} on {lineno}'.format(message=message, lineno=lineno)) + self.catalog = catalog + self.line = line + self.lineno = lineno + + class _NormalizedString(object): def __init__(self, *args): @@ -104,11 +114,12 @@ class PoFileParser(object): 'msgid_plural', ] - def __init__(self, catalog, ignore_obsolete=False): + def __init__(self, catalog, ignore_obsolete=False, abort_invalid=False): self.catalog = catalog self.ignore_obsolete = ignore_obsolete self.counter = 0 self.offset = 0 + self.abort_invalid = abort_invalid self._reset_message_state() def _reset_message_state(self): @@ -276,11 +287,13 @@ class PoFileParser(object): self._add_message() def _invalid_pofile(self, line, lineno, msg): + if self.abort_invalid: + raise PoFileError(msg, self.catalog, line, lineno) print("WARNING:", msg) print("WARNING: Problem on line {0}: {1}".format(lineno + 1, line)) -def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None): +def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=None, abort_invalid=False): """Read messages from a ``gettext`` PO (portable object) file from the given file-like object and return a `Catalog`. @@ -325,9 +338,10 @@ def read_po(fileobj, locale=None, domain=None, ignore_obsolete=False, charset=No :param domain: the message domain :param ignore_obsolete: whether to ignore obsolete messages in the input :param charset: the character set of the catalog. + :param abort_invalid: abort read if po file is invalid """ catalog = Catalog(locale=locale, domain=domain, charset=charset) - parser = PoFileParser(catalog, ignore_obsolete) + parser = PoFileParser(catalog, ignore_obsolete, abort_invalid=abort_invalid) parser.parse(fileobj) return catalog
Fix some typos Closes
@@ -135,7 +135,7 @@ Read API Methods All search/request parameters inside square brackets are **optional**. Methods such as :py:meth:`Zotero.top()`, :py:meth:`Zotero.items()` etc. can be called with no additional parameters if you wish. .. tip:: - The Read API returns 25 results by default (the API documentation claims 50). In the interests of usability, Pyzotero returns 100 items by default, by setting the API ``limit`` parameter to 100, unless it's set by the user. If you wish to retrieve e.g. all top-level items without specifiying a ``limit`` parameter, you'll have to wrap your call with :py:meth:`Zotero.everything()`: ``results = zot.everything(zot.top())``. + The Read API returns 25 results by default (the API documentation claims 50). In the interests of usability, Pyzotero returns 100 items by default, by setting the API ``limit`` parameter to 100, unless it's set by the user. If you wish to retrieve e.g. all top-level items without specifying a ``limit`` parameter, you'll have to wrap your call with :py:meth:`Zotero.everything()`: ``results = zot.everything(zot.top())``. .. py:method:: Zotero.key_info()
GlsaDirSet: improve warnings for bad glsa data To help pkgcheck be able to catch these cases.
@@ -70,23 +70,25 @@ class GlsaDirSet(metaclass=generic_equality): yield packages.KeyedAndRestriction( pkgatoms[pkgname], packages.OrRestriction(*pkgs[pkgname]), key=pkgname) - def iter_vulnerabilities(self): """generator yielding each GLSA restriction""" for path in self.paths: for fn in listdir_files(path): # glsa-1234-12.xml if not (fn.startswith("glsa-") and fn.endswith(".xml")): + logger.warning(f'invalid glsa file name: {fn!r}') continue # This verifies the filename is of the correct syntax. try: [int(x) for x in fn[5:-4].split("-")] except ValueError: + logger.warning(f'invalid glsa file name: {fn!r}') continue root = etree.parse(pjoin(path, fn)) glsa_node = root.getroot() if glsa_node.tag != 'glsa': - raise ValueError("glsa without glsa rootnode") + logger.warning(f'glsa file without glsa root node: {fn!r}') + continue for affected in root.findall('affected'): for pkg in affected.findall('package'): try: @@ -100,12 +102,11 @@ class GlsaDirSet(metaclass=generic_equality): yield fn[5:-4], pkgname, pkgatom, pkg_vuln_restrict except (TypeError, ValueError) as e: # thrown from cpv. - logger.warning(f"invalid glsa- {fn}, package {pkgname}: {e}") + logger.warning(f"invalid glsa file {fn!r}, package {pkgname}: {e}") except IGNORED_EXCEPTIONS: raise except Exception as e: - logger.warning(f"invalid glsa- {fn}: error: {e}") - + logger.warning(f"invalid glsa file {fn!r}: {e}") def generate_intersects_from_pkg_node(self, pkg_node, tag=None): arch = pkg_node.get("arch")
fix: if response content-type is bin, try guessing downloaded content type
@@ -734,6 +734,9 @@ def get_web_image(file_url: str) -> Tuple["ImageFile", str, str]: extn = None extn = get_extension(filename, extn, response=r) + if extn == "bin": + extn = get_extension(filename, extn, content=r.content) or "png" + filename = "/files/" + strip(unquote(filename)) return image, filename, extn
overlays: Fix stream edit click-through bug. Fixes
@@ -476,6 +476,8 @@ exports.initialize = function () { $("#stream_privacy_modal").remove(); $("#subscriptions_table").append(change_privacy_modal); overlays.open_modal('stream_privacy_modal'); + e.preventDefault(); + e.stopPropagation(); }); $("#subscriptions_table").on('click', '#change-stream-privacy-button',
Wrap chain_head call in py.allow_threads The RwLock in the chain controller caused a race condition when not called from within a py.allow_threads. This function was the last remaining FFI wrapper function for chain controller that did not release the GIL before calling back into rust code.
@@ -373,13 +373,17 @@ pub extern "C" fn chain_controller_chain_head( ) -> ErrorCode { check_null!(chain_controller); unsafe { - let chain_head = (*(chain_controller - as *mut ChainController<PyBlockCache, PyBlockValidator>)) - .chain_head(); - let gil_guard = Python::acquire_gil(); let py = gil_guard.python(); + let controller = (*(chain_controller + as *mut ChainController<PyBlockCache, PyBlockValidator>)) + .light_clone(); + + let chain_head = py.allow_threads(move || { + controller.chain_head() + }); + *block = chain_head.to_py_object(py).steal_ptr(); } ErrorCode::Success
Better error message handling, the error is not removed but kept In the old implementation the error object is modified, and so the error message is lost afterwords. In the new implementation the message is just read and the error object is left unchanged.
@@ -1612,7 +1612,7 @@ class Request(MutableMapping): 'edit-already-exists', 'actionthrottledtext', # T192912 ) - messages = error.pop('messages', None) + messages = error.get('messages') message = None # bug T68619; after Wikibase breaking change 1ca9cee change we have a # list of messages
Added column for pre school education occurring. This is an integer because booleans cannot be added as nullable
"is_nullable": true, "column_id": "image_name", "type": "expression" + }, + { + "comment": "Preschool Education conducted on this day", + "column_id": "pse_conducted", + "type": "expression", + "datatype": "integer", + "is_nullable": true, + "is_primary_key": false, + "expression": { + "type": "conditional", + "test": { + "type": "boolean_expression", + "operator": "any_in_multi", + "expression": { + "type": "property_path", + "property_path": [ + "form", + "activity" + ] + }, + "property_value": [ + "conversation", + "cognitive", + "physical_outdoor", + "arts_crafts", + "language" + ] + }, + "expression_if_true": { + "type": "constant", + "constant": 1 + }, + "expression_if_false": { + "type": "constant", + "constant": 0 + } + } } ], "named_expressions": {
Rename test param Incorporating review feedback.
@@ -811,13 +811,13 @@ class TestRemoteState(object): self.remote_state.resource_exists(foo) @pytest.mark.parametrize( - 'resource_topic,deployed_topic,resource_exists,expected_result', [ + 'resource_topic,deployed_topic,is_current,expected_result', [ ('mytopic', 'mytopic', True, True), ('mytopic-new', 'mytopic-old', False, False), ] ) def test_sns_subscription_exists(self, resource_topic, deployed_topic, - resource_exists, expected_result): + is_current, expected_result): sns_subscription = models.SNSLambdaSubscription( topic=resource_topic, resource_name='handler-sns-subscription', lambda_function=None @@ -832,7 +832,7 @@ class TestRemoteState(object): }] } self.client.verify_sns_subscription_current.return_value = \ - resource_exists + is_current remote_state = RemoteState( self.client, DeployedResources(deployed_resources)) assert (
Update README.md as sugested by
@@ -21,7 +21,7 @@ You can [read the docs here](http://docs.aiogram.dev/en/latest/). - Community: [@aiogram](https://t.me/aiogram) - Russian community: [@aiogram_ru](https://t.me/aiogram_ru) - Pip: [aiogram](https://pypi.python.org/pypi/aiogram) - - Docs: [AiOGram Dev](https://docs.aiogram.dev/en/latest/) + - Docs: [aiogram site](https://docs.aiogram.dev/) - Source: [Github repo](https://github.com/aiogram/aiogram) - Issues/Bug tracker: [Github issues tracker](https://github.com/aiogram/aiogram/issues) - Test bot: [@aiogram_bot](https://t.me/aiogram_bot)
tests: temporarily use david's flavor master nfs ganesha builds are broken, let's use this flavor instead for now.
@@ -7,4 +7,5 @@ ganesha_conf_overrides: | } nfs_ganesha_stable: true nfs_ganesha_dev: false -nfs_ganesha_flavor: "ceph_master" +#nfs_ganesha_flavor: "ceph_master" +nfs_ganesha_flavor: ceph_dgalloway \ No newline at end of file
Update load_profile.py Remove comments around generator_fuel_use_gal
@@ -368,10 +368,10 @@ def bau_outage_check(critical_loads_kw, existing_pv_kw_list, gen_existing_kw, ge :param time_steps_per_hour: int :return: bool, int for number of time steps the existing generator and PV can meet the critical load, and boolean for if the entire critical load is met - ## TODO: add generator_fuel_use_gal + :return generator_fuel_use_gal: float, gallons, fuel use during outage """ - generator_fuel_use_gal = 0 ## Check + generator_fuel_use_gal = 0.0 if gen_existing_kw == 0 and existing_pv_kw_list in [None, []]: return False, 0, generator_fuel_use_gal @@ -388,7 +388,7 @@ def bau_outage_check(critical_loads_kw, existing_pv_kw_list, gen_existing_kw, ge gen_output = max(min(unmet, gen_avail), gen_min_turn_down * gen_existing_kw) # output = the greater of either the unmet load or available generation based on fuel and the min loading fuel_needed = fuel_intercept + fuel_slope * gen_output fuel_gal -= fuel_needed - generator_fuel_use_gal += fuel_needed ## max(min(fuel_needed,fuel_gal), 0) # previous logic, check new value + generator_fuel_use_gal += fuel_needed # previous logic: max(min(fuel_needed,fuel_gal), 0) if gen_output < unmet: # if the generator cannot meet the full load, still assume it runs during the outage return False, i, generator_fuel_use_gal
Submitting a change to description of nnz This is in accordance with pull request
@@ -42,7 +42,7 @@ class dia_matrix(_data_matrix): ndim : int Number of dimensions (this is always 2) nnz - Number of nonzero elements + Number of stored values, including explicit zeros data DIA format data array of the matrix offsets
output_mask_tests Added an instance in the test with no outdir Added an instance with a color image to test if statements with len(np.shape(img))==3
@@ -1252,15 +1252,18 @@ def test_plantcv_output_mask(): pcv.params.debug_outdir = cache_dir # Read in test data img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1) + img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1) mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1) # Test with debug = "print" pcv.params.debug = "print" _ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=False) _ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True) + _ = pcv.output_mask(img=img, mask=mask, filename='test.png', mask_only=True) # Test with debug = "plot" pcv.params.debug = "plot" _ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=False) _ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True) + _ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True) # Test with debug = None pcv.params.debug = None imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir,
notification settings: Reorder notification settings labels. This is pre-refactoring commit for notification settings template deduplication using a loop. This commit refactors notifications section and reorder labels to match the ordering in the templates.
@@ -4,28 +4,39 @@ var exports = {}; var stream_notification_settings = [ "enable_stream_desktop_notifications", - "enable_stream_push_notifications", "enable_stream_audible_notifications", + "enable_stream_push_notifications", "enable_stream_email_notifications", ]; var pm_mention_notification_settings = [ "enable_desktop_notifications", - "enable_offline_email_notifications", - "enable_offline_push_notifications", "enable_sounds", + "enable_offline_push_notifications", + "enable_offline_email_notifications", ]; -var other_notification_settings = [ +var desktop_notification_settings = [ "pm_content_in_desktop_notifications", +]; + +var mobile_notification_settings = [ "enable_online_push_notifications", - "notification_sound", +]; + +var email_notification_settings = [ "enable_digest_emails", "enable_login_emails", - "realm_name_in_notifications", "message_content_in_email_notifications", + "realm_name_in_notifications", ]; +var other_notification_settings = desktop_notification_settings.concat( + mobile_notification_settings, + email_notification_settings, + ["notification_sound"] +); + var notification_settings_status = [ {status_label: "pm-mention-notify-settings-status", settings: pm_mention_notification_settings}, {status_label: "other-notify-settings-status", settings: other_notification_settings}, @@ -37,6 +48,21 @@ exports.all_notification_settings_labels = other_notification_settings.concat( stream_notification_settings ); +exports.all_notifications = { + settings: { + stream_notification_settings: stream_notification_settings, + pm_mention_notification_settings: pm_mention_notification_settings, + desktop_notification_settings: desktop_notification_settings, + mobile_notification_settings: mobile_notification_settings, + email_notification_settings: email_notification_settings, + }, + push_notification_tooltip: { + enable_stream_push_notifications: true, + enable_offline_push_notifications: true, + enable_online_push_notifications: true, + }, +}; + function change_notification_setting(setting, setting_data, status_element) { var data = {}; data[setting] = JSON.stringify(setting_data);
update sigpipe test to be correct sigpipe on a _piped process should kill the process, but it should not bubble up as an exception, since sigpipe is expected in pipelines
@@ -1507,10 +1507,13 @@ while True: exit(0) """) - def fn(): - python(python("-u", py1.name, _piped="out"), "-u", py2.name) + p1 = python("-u", py1.name, _piped="out") + p2 = python(p1, "-u", py2.name) - self.assertRaises(sh.SignalException_SIGPIPE, fn) + # SIGPIPE should happen, but it shouldn't be an error, since _piped is + # truthful + self.assertEqual(-p1.exit_code, signal.SIGPIPE) + self.assertEqual(p2.exit_code, 0) def test_piped_generator(self):
remove fail from kitura 5.1 underlying issue was addressed in
"action": "BuildSwiftPackage", "configuration": "release", "tags": "sourcekit-disabled swiftpm" - ,"xfail": { - "issue": "https://bugs.swift.org/browse/SR-15146", - "compatibility": "5.1", - "branch": "master" - } } ] },
ArnoldAttributesUI : Add UI for subdiv_smooth_derivs Otherwise it is outside any section and the ArnoldUITest unit tests fail.
@@ -395,6 +395,21 @@ Gaffer.Metadata.registerNode( ], + "attributes.subdivSmoothDerivs" : [ + + "layout:section", "Subdivision", + "label", "Smooth Derivatives", + + "description", + """ + Computes smooth UV derivatives (dPdu and dPdv) per + vertex. This can be needed to remove faceting + from anisotropic specular and other shading effects + that use the derivatives. + """, + + ], + # Curves "attributes.curvesMode" : [
Changed Gitter link to Rasa Community link **Proposed changes**: Changed Gitter links to Rasa Community Forum links **Status (please check what you already did)**: [x] made PR ready for code review [ ] added some tests for the functionality [x] updated the documentation [ ] updated the changelog
@@ -42,7 +42,7 @@ Hence, the solution is to add more training samples. As this is only a warning, I have an issue, can you help me? --------------------------------- -We'd love to help you. If you are unsure if your issue is related to your setup, you should state your problem in the `gitter chat <https://gitter.im/RasaHQ/rasa_nlu>`_. +We'd love to help you. If you are unsure if your issue is related to your setup, you should state your problem in the `Rasa Community Forum <https://forum.rasa.com>`_. If you found an issue with the framework, please file a report on `github issues <https://github.com/RasaHQ/rasa_nlu/issues>`_ including all the information needed to reproduce the problem.
Add option to docs I'm not sure where the best place is for this, so I placed it around various options similar to it (at least as far as I can tell).
@@ -231,6 +231,19 @@ Forces line endings to the specified value. If not set, values will be guessed p - --le - --line-ending +## Sort Re-exports + +Specifies whether to sort re-exports (`__all__` collections) automatically. + +**Type:** Bool +**Default:** `False` +**Config default:** `false` +**Python & Config File Name:** sort_reexports +**CLI Flags:** + +- --srx +- --sort-reexports + ## Sections What sections isort should display imports for and in what order
delete log file on first opening Do this to reduce massive log file sizes
@@ -57,6 +57,12 @@ from view_image import DialogCodeImage path = os.path.abspath(os.path.dirname(__file__)) home = os.path.expanduser('~') logfile = home + '/QualCoder.log' +# Delete log file on first opening so that file sizes are more managable +try: + os.remove(logfile) +except OSError: + pass + logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s.%(funcName)s %(message)s', datefmt='%Y/%m/%d %I:%M', filename=logfile, level=logging.DEBUG)
remove "fuck these guys" from Robinhood I don't like Robinhood either, but not everyone who is going to use/see this in the future is going to have the same context. Let's keep the code clean and professional.
@@ -56,7 +56,7 @@ class PortfolioController: f"\nCurrent Brokers : {('None', ', '.join(broker_list))[bool(broker_list)]}" ) print("\nCurrently Supported :") - print(" rh Robinhood - fuck these guys") + print(" rh Robinhood") print(" alp Alpaca") print(" ally Ally Invest") print("\nCommands (login required):")
Update gpu-driver-install.sh Fixed the tab indentation
@@ -56,7 +56,9 @@ function skip_test() { fi fi if [[ $DISTRO == "suse_12" ]]; then - # skip others except SLES 12 SP2 + # skip others except SLES 12 SP2 BYOS and SAP, + # However, they use default-kernel and no repo to Azure customer. + # This test will fail until SUSE enables azure-kernel for GRID driver installation if [ $VERSION_ID != "12.2" ];then unsupport_flag=1 fi
Support running setup.py from other directories. This incorporates changes from by
# pyOCD debugger -# Copyright (c) 2012-2019 Arm Limited +# Copyright (c) 2012-2020 Arm Limited # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +19,11 @@ import os from setuptools import setup, find_packages import zipfile +# Get the directory containing this setup.py. Even though full paths are used below, we must +# chdir in order for setuptools-scm to successfully pick up the version. +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +os.chdir(SCRIPT_DIR) + # Read the readme file using UTF-8 encoding. open_args = { 'mode': 'r' } if sys.version_info[0] > 2: @@ -26,7 +31,8 @@ if sys.version_info[0] > 2: # Python 2.x version of open() doesn't support the encoding parameter. open_args['encoding'] = 'utf-8' -with open('README.md', **open_args) as f: +readme_path = os.path.join(SCRIPT_DIR, "README.md") +with open(readme_path, **open_args) as f: readme = f.read() # Build zip of SVD files. @@ -34,7 +40,7 @@ with open('README.md', **open_args) as f: # The SVD files are stored individually in the data/ directory only in the repo. For both sdist and # wheel, the svd_data.zip file is used rather than including the data directory. Thus, this setup # script needs to skip building svd_data.zip if the data/ directory is not present. -svd_dir_path = os.path.join(os.path.dirname(__file__), "pyocd", "debug", "svd") +svd_dir_path = os.path.join(SCRIPT_DIR, "pyocd", "debug", "svd") svd_data_dir_path = os.path.join(svd_dir_path, "data") svd_zip_path = os.path.join(svd_dir_path, "svd_data.zip") if os.path.exists(svd_data_dir_path):
ENH: suspender pre/post plans may be callables If so, they are called with no arguments. This allows adaptive pre/post plans
@@ -723,8 +723,7 @@ class RunEngine: def request_suspend(self, fut, *, pre_plan=None, post_plan=None, justification=None): - """ - Request that the run suspend itself until the future is finished. + """Request that the run suspend itself until the future is finished. The two plans will be run before and after waiting for the future. This enable doing things like opening and closing shutters and @@ -733,12 +732,18 @@ class RunEngine: Parameters ---------- fut : asyncio.Future - pre_plan : iterable, optional - Plan to execute just before suspending - post_plan : iterable, optional - Plan to execute just before resuming + + pre_plan : iterable or callable, optional + Plan to execute just before suspending. If callable, must + take no arguments. + + post_plan : iterable or callable, optional + Plan to execute just before resuming. If callable, must + take no arguments. + justification : str, optional explanation of why the suspension has been requested + """ if not self.resumable: print("No checkpoint; cannot suspend.") @@ -776,6 +781,8 @@ class RunEngine: # if there is a post plan add it between the wait # and the cached messages if post_plan is not None: + if callable(post_plan): + post_plan = post_plan() self._plan_stack.append(ensure_generator(post_plan)) self._response_stack.append(None) # add the wait on the future to the stack @@ -783,8 +790,11 @@ class RunEngine: self._response_stack.append(None) # if there is a pre plan add on top of the wait if pre_plan is not None: + if callable(pre_plan): + pre_plan = pre_plan() self._plan_stack.append(ensure_generator(pre_plan)) self._response_stack.append(None) + # The event loop is still running. The pre_plan will be processed, # and then the RunEngine will be hung up on processing the # 'wait_for' message until `fut` is set.
random: fix type for sample Fixes
# ----- random classes ----- import _random -from typing import ( - Any, TypeVar, Sequence, List, Callable, AbstractSet, Union, - overload -) +from typing import AbstractSet, Any, Callable, Iterator, List, Protocol, Sequence, TypeVar, Union, overload -_T = TypeVar('_T') +_T = TypeVar("_T") +_T_co = TypeVar('_T_co', covariant=True) + +class _Sampleable(Protocol[_T_co]): + def __iter__(self) -> Iterator[_T_co]: ... + def __len__(self) -> int: ... class Random(_random.Random): def __init__(self, x: object = ...) -> None: ... @@ -28,7 +30,7 @@ class Random(_random.Random): def randint(self, a: int, b: int) -> int: ... def choice(self, seq: Sequence[_T]) -> _T: ... def shuffle(self, x: List[Any], random: Callable[[], None] = ...) -> None: ... - def sample(self, population: Union[Sequence[_T], AbstractSet[_T]], k: int) -> List[_T]: ... + def sample(self, population: _Sampleable[_T], k: int) -> List[_T]: ... def random(self) -> float: ... def uniform(self, a: float, b: float) -> float: ... def triangular(self, low: float = ..., high: float = ..., mode: float = ...) -> float: ... @@ -59,7 +61,7 @@ def randrange(start: int, stop: int, step: int = ...) -> int: ... def randint(a: int, b: int) -> int: ... def choice(seq: Sequence[_T]) -> _T: ... def shuffle(x: List[Any], random: Callable[[], float] = ...) -> None: ... -def sample(population: Union[Sequence[_T], AbstractSet[_T]], k: int) -> List[_T]: ... +def sample(population: _Sampleable[_T], k: int) -> List[_T]: ... def random() -> float: ... def uniform(a: float, b: float) -> float: ... def triangular(low: float = ..., high: float = ...,
Use remote support for runs list Enables completion for remote option.
@@ -49,11 +49,7 @@ def runs_list_options(fn): runs_support.all_filters, click.Option(("--json",), help="Format runs as JSON.", is_flag=True), click.Option(("-v", "--verbose"), help="Show run details.", is_flag=True), - click.Option( - ("-r", "--remote",), - metavar="REMOTE", - help="List runs on REMOTE rather than local runs.", - ), + remote_support.remote_option("List runs on REMOTE rather than local runs.") ], ) return fn
Update devops-command-center.rst * Update devops-command-center.rst Added note about ad-hoc tasks. * Update source/administration/devops-command-center.rst
@@ -178,12 +178,16 @@ If the incident channel is private, an existing member of the incident channel m Working with tasks ~~~~~~~~~~~~~~~~~~ -Tasks can be part of pre-configured task templates in playbooks and they can also be added, edited, and removed as needed during an active incident. Any member of the incident channel can work with tasks: +Tasks can be part of pre-configured task templates in playbooks, and they can also be added, edited, and removed as needed during an active incident. Any member of the incident channel can work with tasks. + +.. note:: + Tasks added during an active incident (ad-hoc tasks) are not saved to the playbook. They only apply to that incident and incident channel. * To mark a task as completed, select the unchecked checkbox next to the task. To undo this, clear the checkbox. * To assign a task to a member of the incident channel, select **No Assignee** (or the existing assignee's username), then select a user. * To view any description associated with a task, select the information icon to the right of the task name. * To execute a slash command associated with a task, select **Run** next to the listed slash command. Configured slash commands may be run as often as necessary. +* To add an ad-hoc task, select **+ Add new task**. .. image:: ../images/IC-ad-hoc-tasks.gif
Fix Type Error When PAR is Empty Hail should not signal an error if the PAR is empty. This change provides an explicit type to the PAR before handing it to filter_intervals.
@@ -215,7 +215,10 @@ def impute_sex(call, aaf_threshold=0.0, include_par=False, female_threshold=0.2, hl.map(lambda x_contig: hl.parse_locus_interval(x_contig, rg), rg.x_contigs), keep=True) if not include_par: - mt = hl.filter_intervals(mt, rg.par, keep=False) + interval_type = hl.tarray(hl.tinterval(hl.tlocus(rg))) + mt = hl.filter_intervals(mt, + hl.literal(rg.par, interval_type), + keep=False) mt = mt.filter_rows((mt[aaf] > aaf_threshold) & (mt[aaf] < (1 - aaf_threshold))) mt = mt.annotate_cols(ib=agg.inbreeding(mt.call, mt[aaf]))
Add a note when type hint is provided to DataFrame.apply This PR adds some documentations for the fact that index is switched to the default index when type hints are specified. See also
@@ -2062,23 +2062,27 @@ defaultdict(<class 'list'>, {'col..., 'col...})] potentially expensive, for instance, when the dataset is created after aggregations or sorting. - To avoid this, specify return type in ``func``, for instance, as below: + To avoid this, specify the return type as `Series` or scalar value in ``func``, + for instance, as below: >>> def square(s) -> ks.Series[np.int32]: ... return s ** 2 Koalas uses return type hint and does not try to infer the type. - In case when axis is 1, it requires to specify `DataFrame` with type hints - as below: + In case when axis is 1, it requires to specify `DataFrame` or scalar value + with type hints as below: >>> def plus_one(x) -> ks.DataFrame[float, float]: ... return x + 1 - If the return type is specified, the output column names become + If the return type is specified as `DataFrame`, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. See examples below. + However, this way switches the index type to default index type in the output + because the type hint cannot express the index type at this moment. Use + `reset_index()` to keep index as a workaround. Parameters ----------
improve import of rqda date Hours was missing the first digit of 2 digit hours
@@ -95,16 +95,20 @@ class Rqda_import(): return: standard format date """ - yyyy = r_date[-4:] # remove day string + yyyy = r_date[-4:] months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] mm = str(months.index(r_date[4:7]) + 1) if len(mm) == 1: mm = "0" + mm # TODO check if day is ALWAYS 2 digits dd = r_date[8:10] - hh_mm_ss = r_date[11:19] - if hh_mm_ss[0] == " ": - hh_mm_ss = "0" + hh_mm_ss[1:] + # TODO check if hours is ALWAYS 2 digits + # Different way to get hh ,mm ss as slice was not working + s = r_date.split(" ") + hh_mm_ss = s[-2] + #hh_mm_ss = r_date[11:19] + #if hh_mm_ss[0] == " ": + # hh_mm_ss = "0" + hh_mm_ss[1:] return yyyy + "-" + mm + "-" + dd + " " + hh_mm_ss def import_data(self):
schemas/journal: All relevant events already have SystemAddress So there's no need to document it as an augmentation. This was an error based on examining the EDMC code.
@@ -76,11 +76,6 @@ You MUST add a `StarSystem` key/value pair representing the name of the system this event occurred in. Source this from either `Location`, `FSDJump` or `CarrierJump` as appropriate. -#### SystemAddress -You MUST add a `SystemAddress` key/value pair representing the numerical ID -of the system this event occurred in. Source this from either `Location`, -`FSDJump` or `CarrierJump` as appropriate. - #### StarPos You MUST add a `StarPos` array containing the system co-ordinates from the last `FSDJump`, `CarrierJump`, or `Location` event.
Fix assignment of `flow` or `values` in constructor The `values` parameter always won, even if it wasn't supplied. This is wrong. Instead, the parameter which is supplied and therefore non-`Null` should be the only one which is set.
@@ -276,8 +276,7 @@ class Edge(Node): if input is None or output is None: self._delay_registration_ = True super().__init__(label=Edge.Label(input, output)) - self.flow = flow - self.values = values + self.values = values if values is not None else flow if input is not None and output is not None: input.outputs[output] = self
FIX deleting instances from events with multiple EXDATEs fixes
@@ -109,6 +109,7 @@ def expand(vevent, href=''): dtstartl = {vevent['DTSTART'].dt} def get_dates(vevent, key): + # TODO replace with get_all_properties dates = vevent.get(key) if dates is None: return @@ -271,62 +272,46 @@ def invalid_timezone(prop): return False -def _add_exdate(vevent, instance): - """remove a recurrence instance from a VEVENT's RRDATE list - - :type vevent: icalendar.cal.Event - :type instance: datetime.datetime - """ - - def dates_from_exdate(vdddlist): - return [dts.dt for dts in vevent['EXDATE'].dts] - - if 'EXDATE' not in vevent: - vevent.add('EXDATE', instance) - else: - if not isinstance(vevent['EXDATE'], list): - exdates = dates_from_exdate(vevent['EXDATE']) - else: - exdates = list() - for vddlist in vevent['EXDATE']: - exdates.append(dates_from_exdate(vddlist)) - exdates += [instance] - vevent.pop('EXDATE') - vevent.add('EXDATE', exdates) +def _get_all_properties(vevent, prop): + """Get all properties from a vevent, even if there are several entries + example input: + EXDATE:1234,4567 + EXDATE:7890 -def _remove_instance(vevent, instance): - """remove a recurrence instance from a VEVENT's RRDATE list + returns: [1234, 4567, 7890] :type vevent: icalendar.cal.Event - :type instance: datetime.datetime + :type prop: str """ - if isinstance(vevent['RDATE'], list): - rdates = [leaf.dt for tree in vevent['RDATE'] for leaf in tree.dts] + if prop not in vevent: + return list() + if isinstance(vevent[prop], list): + rdates = [leaf.dt for tree in vevent[prop] for leaf in tree.dts] else: - rdates = [vddd.dt for vddd in vevent['RDATE'].dts] - rdates = [one for one in rdates if one != instance] - vevent.pop('RDATE') - if rdates != []: - vevent.add('RDATE', rdates) + rdates = [vddd.dt for vddd in vevent[prop].dts] + return rdates def delete_instance(vevent, instance): - """remove a recurrence instance from a VEVENT's RRDATE list + """remove a recurrence instance from a VEVENT's RRDATE list or add it + to the EXDATE list :type vevent: icalendar.cal.Event :type instance: datetime.datetime """ - - if 'RDATE' in vevent and 'RRULE' in vevent: # TODO check where this instance is coming from and only call the # appropriate function - _add_exdate(vevent, instance) - _remove_instance(vevent, instance) - elif 'RRULE' in vevent: - _add_exdate(vevent, instance) - elif 'RDATE' in vevent: - _remove_instance(vevent, instance) + if 'RRULE' in vevent: + exdates = _get_all_properties(vevent, 'EXDATE') + exdates += [instance] + vevent.pop('EXDATE') + vevent.add('EXDATE', exdates) + if 'RDATE' in vevent: + rdates = [one for one in _get_all_properties(vevent, 'RDATE') if one != instance] + vevent.pop('RDATE') + if rdates != []: + vevent.add('RDATE', rdates) def is_aware(dtime):
corrected grammatical error in readme fixed a grammatical error in readme
@@ -44,7 +44,7 @@ Biblatex entry: ## Community -You can use Gitter to communicate with people who also interested in Auto-Keras. +You can use Gitter to communicate with people who are also interested in Auto-Keras. <a href="https://gitter.im/autokeras/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"><img src="https://badges.gitter.im/autokeras/Lobby.svg" alt="Join the chat at https://gitter.im/autokeras/Lobby" style="width: 92px"></a> You can also follow us on Twitter [@autokeras](https://twitter.com/autokeras) for the latest news.
Small fixes in test pool (windows) * Small fixes. Updated versions numbers and removing an odd -1. * Small fixes * Format fixing. Ofc. * Added missing import.
@@ -7,7 +7,7 @@ import logging from tqdm import tqdm -from ansys.mapdl.core import launch_mapdl +from ansys.mapdl.core import launch_mapdl, get_ansys_path from ansys.mapdl.core.misc import threaded from ansys.mapdl.core.misc import create_temp_dir, threaded_daemon from ansys.mapdl.core.launcher import (
update INVENTREE_LOG_LEVEL param Turn INVENTREE_DEBUG_LEVEL => INVENTREE_LOG_LEVEL
# Set DEBUG to True for a development setup INVENTREE_DEBUG=True -INVENTREE_DEBUG_LEVEL=INFO +INVENTREE_LOG_LEVEL=INFO # Database configuration options # Note: The example setup is for a PostgreSQL database (change as required)
refactor: Rename bugdown to markdown in message_edit.py. This commit is part of series of commits aimed at renaming bugdown to markdown.
@@ -8,7 +8,6 @@ from django.utils.timezone import now as timezone_now from django.utils.translation import ugettext as _ from zerver.decorator import REQ, has_request_variables -from zerver.lib import markdown as bugdown from zerver.lib.actions import ( do_delete_messages, do_update_message, @@ -17,6 +16,7 @@ from zerver.lib.actions import ( ) from zerver.lib.exceptions import JsonableError from zerver.lib.html_diff import highlight_html_differences +from zerver.lib.markdown import MentionData from zerver.lib.message import access_message, truncate_body from zerver.lib.queue import queue_json_publish from zerver.lib.response import json_error, json_success @@ -159,14 +159,14 @@ def update_message_backend(request: HttpRequest, user_profile: UserMessage, links_for_embed: Set[str] = set() prior_mention_user_ids: Set[int] = set() mention_user_ids: Set[int] = set() - mention_data: Optional[bugdown.MentionData] = None + mention_data: Optional[MentionData] = None if content is not None: content = content.strip() if content == "": content = "(deleted)" content = truncate_body(content) - mention_data = bugdown.MentionData( + mention_data = MentionData( realm_id=user_profile.realm.id, content=content, )
[modules/memory] Use /proc/meminfo instead of psutil Try to be more accurate in calculating memory by using the /proc/meminfo interface directly. fixes
@@ -9,22 +9,24 @@ Parameters: * memory.usedonly: Only show the amount of RAM in use (defaults to False). Same as memory.format="{used}" """ -try: - import psutil -except ImportError: - pass +import re import bumblebee.util import bumblebee.input import bumblebee.output import bumblebee.engine +class Container(object): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + class Module(bumblebee.engine.Module): def __init__(self, engine, config): super(Module, self).__init__(engine, config, bumblebee.output.Widget(full_text=self.memory_usage) ) - self._mem = psutil.virtual_memory() + self.update(None) + engine.input.register_callback(self, button=bumblebee.input.LEFT_MOUSE, cmd="gnome-system-monitor") @@ -36,18 +38,30 @@ class Module(bumblebee.engine.Module): return self.parameter("format", "{used}/{total} ({percent:05.02f}%)") def memory_usage(self, widget): - used = bumblebee.util.bytefmt(self._mem.total - self._mem.available) - total = bumblebee.util.bytefmt(self._mem.total) - - return self._format.format(used=used, total=total, percent=self._mem.percent) + return self._format.format(**self._mem) def update(self, widgets): - self._mem = psutil.virtual_memory() + data = {} + with open("/proc/meminfo", "r") as f: + for line in f: + tmp = re.split(r"[:\s]+", line) + value = int(tmp[1]) + if tmp[2] == "kB": value = value*1024 + if tmp[2] == "mB": value = value*1024*1024 + if tmp[2] == "gB": value = value*1024*1024*1024 + data[tmp[0]] = value + self._mem = { + "total": bumblebee.util.bytefmt(data["MemTotal"]), + "available": bumblebee.util.bytefmt(data["MemAvailable"]), + "free": bumblebee.util.bytefmt(data["MemFree"]), + "used": bumblebee.util.bytefmt(data["MemTotal"] - data["MemFree"] - data["Buffers"] - data["Cached"] - data["Slab"]), + "percent": (float(data["MemTotal"] - data["MemAvailable"])/data["MemTotal"])*100 + } def state(self, widget): - if self._mem.percent > float(self.parameter("critical", 90)): + if self._mem["percent"] > float(self.parameter("critical", 90)): return "critical" - if self._mem.percent > float(self.parameter("warning", 80)): + if self._mem["percent"] > float(self.parameter("warning", 80)): return "warning" return None
fix PE doc, add a note Because WINDOWS does not support NCCL. It will raise an error when users try to run the multi-GPUs program on WINDOWS machine. So add a note to remind users to replace parallel executor to executor.
@@ -16,6 +16,8 @@ Image classification, which is an important field of computer vision, is to clas Running sample code in this directory requires PaddelPaddle Fluid v0.13.0 and later, the latest release version is recommended, If the PaddlePaddle on your device is lower than v0.13.0, please follow the instructions in [installation document](http://paddlepaddle.org/documentation/docs/zh/1.3/beginners_guide/install/index_cn.html) and make an update. +Note: Please replace [fluid.ParallelExecutor](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/fluid_cn.html#parallelexecutor) to [fluid.Executor](http://paddlepaddle.org/documentation/docs/zh/1.4/api_cn/fluid_cn.html#executor) when running the program in the windows & GPU environment. + ## Data preparation An example for ImageNet classification is as follows. First of all, preparation of imagenet data can be done as:
Add type and example for 'class_order' in ClassIncremental. Close
@@ -22,6 +22,7 @@ class ClassIncremental(_BaseCLLoader): Desactivated if `increment` is a list. :param transformations: A list of transformations applied to all tasks. :param class_order: An optional custom class order, used for NC. + e.g. [0,1,2,3,4,5,6,7,8,9] or [5,2,4,1,8,6,7,9,0,3] """ def __init__( @@ -31,7 +32,7 @@ class ClassIncremental(_BaseCLLoader): increment: Union[List[int], int] = 0, initial_increment: int = 0, transformations: List[Callable] = None, - class_order=None + class_order: Union[List[int], None]=None ) -> None: super().__init__(cl_dataset=cl_dataset, nb_tasks=nb_tasks, transformations=transformations)
When not generated, raise a Program_Error in unparsers TN:
@@ -241,7 +241,7 @@ package ${ada_lib_name}.Analysis.Implementation is (Node : access ${root_node_value_type}) return String is abstract; % else: function Unparse (Node : access ${root_node_value_type}) return String is - ("Unparser not generated"); + (raise Program_Error with "Unparser not generated"); % endif % if ctx.properties_logging:
a Auto, got moved after the execute and batch. o Origin, got spooled as part of auto. q Quit, got spooled as part of auto, or sets the flag if -a not set.
@@ -295,6 +295,7 @@ def run(): kernel_root.execute("Debug Device") if args.input is not None: + # Load any input file import os kernel_root.load(os.path.realpath(args.input.name)) @@ -306,9 +307,6 @@ def run(): device.setting(bool, "mock", True) device.mock = True - if args.quit: - device._quit = True - if args.set is not None: # Set the variables requested here. for v in args.set: @@ -325,37 +323,10 @@ def run(): elif isinstance(v, str): setattr(device, attr, str(value)) - if args.auto: - elements = kernel_root.elements - if args.speed is not None: - for o in elements.ops(): - o.speed = args.speed - device("plan copy\n") - device("plan preprocess\n") - device("plan validate\n") - device("plan blob\n") - device("plan preopt\n") - device("plan optimize\n") - device("plan spool\n") - device._quit = True - - if args.origin: - - def origin(): - yield COMMAND_MODE_RAPID - yield COMMAND_SET_ABSOLUTE - yield COMMAND_MOVE, 0, 0 - - device.spooler.job(origin) - kernel.bootstrap("ready") - if args.output is not None: - import os - - kernel_root.save(os.path.realpath(args.output.name)) - if args.execute: + # Any execute code segments gets executed here. kernel_root.channel("console").watch(print) for v in args.execute: if v is None: @@ -364,14 +335,38 @@ def run(): kernel_root.channel("console").unwatch(print) if args.batch: + # If a batch file is specified it gets processed here. kernel_root.channel("console").watch(print) with args.batch as batch: for line in batch: device(line.strip() + "\n") kernel_root.channel("console").unwatch(print) - if args.console: + if args.auto: + # Auto start does the planning and spooling of the data. + elements = kernel_root.elements + if args.speed is not None: + for o in elements.ops(): + o.speed = args.speed + device("plan copy preprocess validate blob preopt optimize\n") + if args.origin: + device("plan append origin\n") + if args.quit: + device("plan append shutdown\n") + device("plan spool\n") + else: + if args.quit: + # Flag quitting on complete. + device._quit = True + + if args.output is not None: + # output the file you have at this point. + import os + + kernel_root.save(os.path.realpath(args.output.name)) + if args.console: + # Console command gives the user text access to the console as a whole. def thread_text_console(): kernel_root.channel("console").watch(print) while True: @@ -387,4 +382,7 @@ def run(): thread_text_console() else: kernel.threaded(thread_text_console, thread_name="text_console", daemon=True) - kernel.bootstrap("mainloop") + + kernel.bootstrap("mainloop") # This is where the GUI loads and runs. + +
Provide fallback for disabled port security extension The push notification logic always assumed the port security object would exist but it is not present on the port when the extension is disabled. This defaults it to true like the server side code.[1] 1. Closes-Bug:
@@ -254,7 +254,8 @@ class CacheBackedPluginApi(PluginApi): 'allowed_address_pairs': [{'mac_address': o.mac_address, 'ip_address': o.ip_address} for o in port_obj.allowed_address_pairs], - 'port_security_enabled': port_obj.security.port_security_enabled, + 'port_security_enabled': getattr(port_obj.security, + 'port_security_enabled', True), 'qos_policy_id': port_obj.qos_policy_id, 'network_qos_policy_id': net_qos_policy_id, 'profile': port_obj.binding.profile,
[ReTrigger] Set default size if resize is less than 0 Make smallest resize smaller
@@ -175,8 +175,10 @@ class ReTrigger(getattr(commands, "Cog", object)): return msg def resize_image(self, size, image): - length, width = (32, 32) # Start with the smallest size we want to upload + length, width = (16, 16) # Start with the smallest size we want to upload im = Image.open(image) + if size <= 0: + size = 1 im.thumbnail((length*size, width*size), Image.ANTIALIAS) byte_array = BytesIO() im.save(byte_array, format="PNG")
handler: do not validate the server certificate against the CA Otherwise rgw handler ends up with an error when using https.
@@ -44,11 +44,11 @@ check_socket() { check_for_curl_or_wget() { local i=$1 if ${DOCKER_EXECS[i]} command -v wget &>/dev/null; then - rgw_test_command="wget --tries 1 --quiet -O /dev/null" + rgw_test_command="wget --no-check-certificate --tries 1 --quiet -O /dev/null" elif ${DOCKER_EXECS[i]} command -v curl &>/dev/null; then - rgw_test_command="curl --fail --silent --output /dev/null" + rgw_test_command="curl -k --fail --silent --output /dev/null" else - echo "It seems that neither curl or wget are available on your system." + echo "It seems that neither curl nor wget are available on your system." echo "Cannot test rgw connection." exit 0 fi
Fixed failing windows test This test works, but verifying the equality using elementwise comparison as done raises a DeprecationWarning which causes some tests to fail. Skipping this test for now.
@@ -168,6 +168,9 @@ def test_ne_shapes( tensor2 == tensor1 [email protected]( + reason="Testing this works causes a DeprecationWarning due to ele-wise comp" +) def test_eq_ndarray(row_data: List) -> None: """Test equality between a SEPT and a simple type (int, float, bool, np.ndarray)""" sub_row_data: SEPT = row_data[0]
Add a TODO to track migrating Controller summaries/files when TrainerTpu checkpointing is on.
@@ -149,7 +149,8 @@ tf.flags.DEFINE_float('saver_keep_checkpoint_every_n_hours', None, tf.flags.DEFINE_bool( 'checkpoint_in_trainer_tpu', False, 'Whether to enable checkpointing in TrainerTpu, allowing for ' - 'operation without a separate Controller task.') + 'operation without a separate Controller task.' + 'TODO(b/137871213) migrate file/summaries from Controller.') # Please consider adding model params instead of adding flags.
[bugfix] Do not iterate over sys.modules See:
@@ -507,7 +507,7 @@ def writelogheader() -> None: # imported modules log('MODULES:') - for module in sys.modules.values(): + for module in sys.modules.copy().values(): filename = version.get_module_filename(module) if not filename: continue
Coverage: add patterns to ignore some debug-related statements TN:
@@ -5,3 +5,12 @@ omit = */langkit/gdb/* */langkit/setup.py */langkit/stylechecks/* + +[report] +exclude_lines = + def __repr__ + raise NotImplementedError() + raise not_implemented_error + assert False + if .*\.verbosity\..*: + # no-code-coverage
Split up form and module rearrangements They're actually pretty different - doesn't make sense to pretend otherwise
@@ -311,13 +311,20 @@ hqDefine('app_manager/js/app_manager', function () { // another, do a check to see if this is the sortable list we're moving the item to if ($sortable.find(ui.item).length < 1) { return; } - var toModuleUid = $sortable.parents('.edit-module-li').data('uid'), + if ($sortable.hasClass('sortable-forms')) { + rearrangeForms(ui, $sortable); + } else { + rearrangeModules($sortable); + } + } + function rearrangeForms(ui, $sortable) { + var url = initialPageData.reverse('rearrange', 'forms'), + toModuleUid = $sortable.parents('.edit-module-li').data('uid'), fromModuleUid = ui.item.data('moduleuid'), - sortingForms = $sortable.hasClass('sortable-forms'), - movingFormToNewModule = sortingForms && toModuleUid !== fromModuleUid; + movingToNewModule = toModuleUid !== fromModuleUid; var move; - if (movingFormToNewModule) { + if (movingToNewModule) { move = calculateMoveFormToNewModule($sortable, ui, toModuleUid); } else { move = calculateMoveWithinScope($sortable); @@ -325,13 +332,22 @@ hqDefine('app_manager/js/app_manager', function () { var from = move[0], to = move[1]; - if (to !== from || movingFormToNewModule) { + if (to !== from || movingToNewModule) { resetIndexes($sortable); - if (movingFormToNewModule) { + if (movingToNewModule) { resetOldModuleIndices($sortable, fromModuleUid); } - saveRearrangement(from, to, fromModuleUid, toModuleUid, sortingForms); - hqImport("app_manager/js/menu").setPublishStatus(true); + saveRearrangement(url, from, to, fromModuleUid, toModuleUid); + } + } + function rearrangeModules($sortable) { + var url = initialPageData.reverse('rearrange', 'modules'), + move = calculateMoveWithinScope($sortable), + from = move[0], + to = move[1]; + + if (to !== from) { + saveRearrangement(url, from, to); } } function calculateMoveFormToNewModule($sortable, ui, toModuleUid) { @@ -385,16 +401,14 @@ hqDefine('app_manager/js/app_manager', function () { } }); } - function saveRearrangement(from, to, fromModuleUid, toModuleUid, sortingForms) { - var url = initialPageData.reverse('rearrange', sortingForms ? 'forms' : 'modules'); + function saveRearrangement(url, from, to, fromModuleUid, toModuleUid) { + resetIndexes($sortable); var data = { from: from, to: to, + from_module_uid: fromModuleUid, + to_module_uid: toModuleUid, }; - if (sortingForms) { - data['from_module_uid'] = fromModuleUid; - data['to_module_uid'] = toModuleUid; - } $.ajax(url, { method: 'POST', data: data, @@ -405,6 +419,7 @@ hqDefine('app_manager/js/app_manager', function () { hqImport('hqwebapp/js/alert_user').alert_user(xhr.responseJSON.error, "danger"); }, }); + hqImport("app_manager/js/menu").setPublishStatus(true); } };
Added min_order_pct (default 2%) to SimpleOrders. This stops multiple remedial orders flowing through when net_worth fluctuates.
@@ -248,6 +248,8 @@ class SimpleOrders(TensorTradeActionScheme): quantity = (size * instrument).quantize() + price = ep.price + value = size*float(price) if size < 10 ** -instrument.precision \ or value < min_order_pct*portfolio.net_worth: return []
Fix License in setup.py See the "license" field should be a single statement (content other than the description with double newlines breaks the metadata).
@@ -135,7 +135,7 @@ setup( }, include_package_data=True, install_requires=DEPS, - license=read("LICENSE"), + license="MIT License", zip_safe=False, keywords='simpleflow amazon swf simple workflow', classifiers=[
Fix - removed double copy of already copied file Fix - remove double creation of hardlink resulting in WindowError
@@ -544,9 +544,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): transfers = instance.data.get("transfers", list()) for src, dest in transfers: self.copy_file(src, dest) - if os.path.exists(dest): # TODO needs to be updated during site implementation integrated_file_sizes[dest] = os.path.getsize(dest) + # already copied, delete from transfers to limit double copy TODO double check + instance.data.get("transfers", list()).remove([src, dest]) # Produce hardlinked copies @@ -557,8 +558,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): hardlinks = instance.data.get("hardlinks", list()) for src, dest in hardlinks: self.log.debug("Hardlinking file .. {} -> {}".format(src, dest)) + if not os.path.exists(dest): self.hardlink_file(src, dest) - if os.path.exists(dest): + # TODO needs to be updated during site implementation integrated_file_sizes[dest] = os.path.getsize(dest)
DOC: removed comment Removed outdated comment.
@@ -408,8 +408,6 @@ class TestNetCDF4Integration(object): if False """ - # TODO(#585): consider moving to class with netCDF tests - # Create an instrument object that has a meta with some # variables allowed to be nan within metadata when exporting self.testInst = pysat.Instrument('pysat', 'testing')
Clients: fix incorrect message about having replicas on tape. The message was printed even in cases when sources was initially empty. Also the sources list was changed while iterating over it. Fix this too.
@@ -1152,12 +1152,13 @@ class DownloadClient: # filtering out tape sources if self.is_tape_excluded: for file_item in file_items: - sources = file_item['sources'] - for src in file_item['sources']: + unfiltered_sources = copy.copy(file_item['sources']) + for src in unfiltered_sources: if src in tape_rses: - sources.remove(src) - if not sources: - logger(logging.WARNING, 'Requested did {} has only replicas on tape. No files will be download.'.format(file_item['did'])) + file_item['sources'].remove(src) + if unfiltered_sources and not file_item['sources']: + logger(logging.WARNING, 'The requested DID {} only has replicas on tape. Direct download from tape is prohibited. ' + 'Please request a transfer to a non-tape endpoint.'.format(file_item['did'])) nrandom = item.get('nrandom') if nrandom:
Reset config changes in server unit tests Leaking config changes can make later tests accidentally depend on the test ordering, which can break running smaller subsets of the test suite and break tests when renaming.
@@ -71,6 +71,18 @@ def _create_report_finished_msg(status) -> ForwardMsg: class ServerTest(ServerTestCase): _next_report_id = 0 + def setUp(self) -> None: + self.original_ws_compression = config.get_option( + "server.enableWebsocketCompression" + ) + return super().setUp() + + def tearDown(self): + config.set_option( + "server.enableWebsocketCompression", self.original_ws_compression + ) + return super().tearDown() + @tornado.testing.gen_test def test_start_stop(self): """Test that we can start and stop the server.""" @@ -152,6 +164,7 @@ class ServerTest(ServerTestCase): @tornado.testing.gen_test def test_websocket_compression(self): with self._patch_report_session(): + config._set_option("server.enableWebsocketCompression", True, "test") yield self.start_server_loop() # Connect to the server, and explicitly request compression. @@ -532,6 +545,14 @@ class UnixSocketTest(unittest.TestCase): """Tests start_listening uses a unix socket when socket.address starts with unix://""" + def setUp(self) -> None: + self.original_address = config.get_option("server.address") + return super().setUp() + + def tearDown(self) -> None: + config.set_option("server.address", self.original_address) + return super().tearDown() + @staticmethod def get_httpserver(): httpserver = mock.MagicMock()
Site.page_restrictions(): Do not raise NoPage Nonexistent pages can be protected, so treat all pages the same.
@@ -3148,8 +3148,6 @@ class APISite(BaseSite): def page_restrictions(self, page): """Return a dictionary reflecting page protections.""" - if not page.exists(): - raise NoPage(page) if not hasattr(page, '_protection'): self.loadpageinfo(page) return page._protection
Removed processing of edited commands Apparently this causes issues when links are added - and then previewed by discord - it counts as a message post and then edit - so things are processed twice when they're not supposed to be.
@@ -384,15 +384,13 @@ async def on_message(message): async def on_message_edit(before, message): # Run through the on_message commands, but on edits. if not message.server: - # This wasn't said in a server, process commands, then return - await bot.process_commands(message) + # This wasn't said in a server, return return try: message.author.roles except AttributeError: # Not a User - await bot.process_commands(message) return # Check if we need to ignore or delete the message @@ -427,10 +425,6 @@ async def on_message_edit(before, message): # We need to delete the message - top priority await bot.delete_message(message) - if not ignore: - # We're processing commands here - await bot.process_commands(message) - # Add our cogs
Grep fix for chaining Grep fix for chaining
@@ -356,17 +356,22 @@ def _grep(path, options = '' # prepare the command + cmd = None + if path: cmd = ( r'''grep {options} {pattern} {path}''' - .format( - options=options, - pattern=pattern, - path=path, - ) + .format(options=options, pattern=pattern, path=path,) ) + else: + # in stdin mode + options = [] if options == '' else [options] + cmd = ['grep'] + options + [pattern] try: + if path: ret = __mods__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True) + else: + ret = __mods__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True, stdin=string) except (IOError, OSError) as exc: raise CommandExecutionError(exc.strerror)
Warn against using workbench with the rest of lib Doing so may cause issues because the workbench overwrites the STIX Object mapping.
"cell_type": "markdown", "metadata": {}, "source": [ - "## Using A Workbench" + "## Using The Workbench" ] }, { "source": [ "Defaults can also be set for the [created timestamp](../api/datastore/stix2.workbench.rst#stix2.workbench.set_default_created), [external references](../api/datastore/stix2.workbench.rst#stix2.workbench.set_default_external_refs) and [object marking references](../api/datastore/stix2.workbench.rst#stix2.workbench.set_default_object_marking_refs)." ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<div class=\"alert alert-warning\">\n", + "\n", + "**Warning:**\n", + "\n", + "The workbench layer replaces STIX Object classes with special versions of them that use \"wrappers\" to provide extra functionality. Because of this, we recommend that you **either use the workbench layer or the rest of the library, but not both**. In other words, don't import from both ``stix2.workbench`` and any other submodules of ``stix2``.\n", + "\n", + "</div>" + ] } ], "metadata": {
Minor update to fix crawler for worlnovel.online Sorting TOC by chapter number in title Fix error while getting chapter body because of change in novel site
@@ -7,6 +7,7 @@ import json import logging import re from ..utils.crawler import Crawler +from operator import itemgetter, attrgetter logger = logging.getLogger('WORLDNOVEL_ONLINE') search_url = 'https://www.worldnovel.online/?s=%s' @@ -85,10 +86,18 @@ class WorldnovelonlineCrawler(Crawler): chapters = soup.select('div.lightnovel-episode ul li a') - chapters.reverse() + temp_chapters = [] for a in chapters: - chap_id = len(self.chapters) + 1 + chap_id = int(re.findall('\d+', a.text.strip())[0]) + temp_chapters.append({ + 'id' : chap_id, + 'url': a['href'], + 'title' : a.text.strip()}) + # end for + + for a in sorted(temp_chapters, key=itemgetter('id')): + chap_id = a['id'] if len(self.chapters) % 100 == 0: vol_id = chap_id//100 + 1 vol_title = 'Volume ' + str(vol_id) @@ -98,10 +107,10 @@ class WorldnovelonlineCrawler(Crawler): }) # end if self.chapters.append({ - 'id': chap_id, + 'id': a['id'], 'volume': vol_id, - 'url': self.absolute_url(a['href']), - 'title': a.text.strip() or ('Chapter %d' % chap_id), + 'url': a['url'], + 'title': a['title'], }) # end for @@ -115,17 +124,19 @@ class WorldnovelonlineCrawler(Crawler): soup = self.get_soup(chapter['url']) logger.debug(soup.title.string) - - c = soup.select('div.elementor-widget-container') - contents = c[5] + #content = soup.find('div',{'data-element_type':'theme-post-content.default'}).soup.select('div.elementor-widget-container') + contents = soup.find('div',{'data-element_type':'theme-post-content.default'}) + if contents.findAll('div', {"class": 'code-block'}): for ads in contents.findAll('div', {"class": 'code-block'}): ads.decompose() + if contents.findAll('div', {"align": 'left'}): for ads in contents.findAll('div', {"align": 'left'}): ads.decompose() + if contents.findAll('div', {"align": 'center'}): for ads in contents.findAll('div', {"align": 'center'}): ads.decompose() - if contents.h1: - contents.h1.decompose() + #if contents.h1: + # contents.h1.decompose() # end if return contents.prettify() # end def
remove gh_pages_master.yml CI remove CI badge update jsonschema workflow to push to 'pages' directory instead of 'pages/devel' and rename workflow name
-| |license| |docs| |codecov| |slack| |release| |installation| |regressiontest| |gh_pages_master| |gh_pages_devel| |checkurls| |dailyurlcheck| |codefactor| |blackformat| |black| |isort| |issues| |open_pr| |commit_activity_yearly| |commit_activity_monthly| |core_infrastructure| |zenodo| +| |license| |docs| |codecov| |slack| |release| |installation| |regressiontest| |gh_pages_devel| |checkurls| |dailyurlcheck| |codefactor| |blackformat| |black| |isort| |issues| |open_pr| |commit_activity_yearly| |commit_activity_monthly| |core_infrastructure| |zenodo| .. |docs| image:: https://readthedocs.org/projects/buildtest/badge/?version=latest :alt: Documentation Status .. |regressiontest| image:: https://github.com/buildtesters/buildtest/workflows/regressiontest/badge.svg :target: https://github.com/buildtesters/buildtest/actions -.. |gh_pages_devel| image:: https://github.com/buildtesters/buildtest/workflows/Upload%20JSON%20Schema%20to%20gh-pages%20on%20devel/badge.svg - :target: https://github.com/buildtesters/buildtest/actions - -.. |gh_pages_master| image:: https://github.com/buildtesters/buildtest/workflows/Upload%20JSON%20Schema%20to%20gh-pages%20for%20master%20branch/badge.svg +.. |gh_pages_devel| image:: https://github.com/buildtesters/buildtest/workflows/Schema%20Documentation/badge.svg :target: https://github.com/buildtesters/buildtest/actions .. |dailyurlcheck| image:: https://github.com/buildtesters/buildtest/workflows/Daily%20Check%20URLs/badge.svg