message
stringlengths
13
484
diff
stringlengths
38
4.63k
Fix mujoco_py error in Dockerfile.ci The Dockerfile.ci build broke because mujoco_py could not find the LD_LIBRARY_PATH variable with mjpro150/bin in it. This change exports LD_LIBRARY_PATH manually for the mujoco_py build so avoid the error.
@@ -96,7 +96,7 @@ RUN ["/bin/bash", "-c", "source activate garage && pip uninstall -y Box2D Box2D- # THAT WE DON'T PUBLISH THE KEY ARG MJKEY RUN echo "${MJKEY}" > /root/.mujoco/mjkey.txt -RUN ["/bin/bash", "-c", "source activate garage && python -c 'import mujoco_py'"] +RUN ["/bin/bash", "-c", "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/root/.mujoco/mjpro150/bin && source activate garage && python -c 'import mujoco_py'"] RUN rm /root/.mujoco/mjkey.txt # Setup repo
Mock a ConnectionState object to fix wait=True errors in webhooks. Fixes
@@ -100,10 +100,6 @@ class WebhookAdapter: """ raise NotImplementedError() - def store_user(self, data): - # mocks a ConnectionState for appropriate use for Message - return BaseUser(state=self.webhook._state, data=data) - async def _wrap_coroutine_and_cleanup(self, coro, cleanup): try: return await coro @@ -301,6 +297,41 @@ class RequestsWebhookAdapter(WebhookAdapter): from .message import Message return Message(data=response, state=self.webhook._state, channel=self.webhook.channel) +class _FriendlyHttpAttributeErrorHelper: + __slots__ = () + + def __getattr__(self, attr): + raise AttributeError('PartialWebhookState does not support http methods.') + +class _PartialWebhookState: + __slots__ = ('loop',) + + def __init__(self, adapter): + # Fetch the loop from the adapter if it's there + try: + self.loop = adapter.loop + except AttributeError: + self.loop = None + + def _get_guild(self, guild_id): + return None + + def store_user(self, data): + return BaseUser(state=self, data=data) + + @property + def is_bot(self): + return True + + @property + def http(self): + # Some data classes assign state.http and that should be kosher + # however, using it should result in a late-binding error. + return _FriendlyHttpAttributeErrorHelper() + + def __getattr__(self, attr): + raise AttributeError('PartialWebhookState does not support {0:!r}.'.format(attr)) + class Webhook: """Represents a Discord webhook. @@ -371,7 +402,7 @@ class Webhook: self.name = data.get('name') self.avatar = data.get('avatar') self.token = data['token'] - self._state = state + self._state = state or _PartialWebhookState(adapter) self._adapter = adapter self._adapter._prepare(self) @@ -453,7 +484,7 @@ class Webhook: If this is a partial webhook, then this will always return ``None``. """ - return self._state and self._state._get_guild(self.guild_id) + return self._state._get_guild(self.guild_id) @property def channel(self):
Harmony to Deadline - fix if backslash is used in path Settings '64\bin' was mangled to '6in'
@@ -26,7 +26,7 @@ class HarmonyPrelaunchHook(PreLaunchHook): ( "import avalon.harmony;" "avalon.harmony.launch(\"{}\")" - ).format(harmony_executable) + ).format(harmony_executable.replace("\\", "/")) ] # Append as whole list as these areguments should not be separated
makefile: Ensure wheel is installed when setting up virtual env. Add "$(PYTHON) -m pip install wheel".
@@ -47,4 +47,4 @@ venv: $(ZT_VENV)/bin/activate $(ZT_VENV)/bin/activate: setup.py @echo "=== Installing development environment ===" test -d $(ZT_VENV) || $(BASEPYTHON) -m venv $(ZT_VENV) - $(PYTHON) -m pip install -U pip && $(PYTHON) -m pip install -e .[dev] && touch $(ZT_VENV)/bin/activate + $(PYTHON) -m pip install wheel && $(PYTHON) -m pip install -U pip && $(PYTHON) -m pip install -e .[dev] && touch $(ZT_VENV)/bin/activate
tesseract/5.0.0: fix wrong compiler version being output The compiler version shown was the one used instead of the one required. Bonify error message to include required and currently found compiler version.
@@ -94,7 +94,7 @@ class TesseractConan(ConanFile): self.output.warn( "%s recipe lacks information about the %s compiler standard version support" % (self.name, compiler)) elif compiler_version < minimal_version[compiler]: - raise ConanInvalidConfiguration("{} requires a {} version >= {}".format(self.name, compiler, compiler_version)) + raise ConanInvalidConfiguration("{} requires a {} version >= {}, but {} was found".format(self.name, compiler, minimal_version[compiler], compiler_version)) def source(self): tools.get(**self.conan_data["sources"][self.version],
Update CHANGES.txt fixed CHANGES.txt to have only one section for Peter Dienier
@@ -9,6 +9,7 @@ RELEASE VERSION/DATE TO BE FILLED IN LATER From Peter Diener: - Additional fix to issue #3135 - Also handle 'pure' and 'elemental' type bound procedures + - Fix issue #3135 - Handle Fortran submodules and type bound procedures From William Deegan: @@ -21,9 +22,6 @@ RELEASE VERSION/DATE TO BE FILLED IN LATER Decider and not clearing it when the configure context is completed. - Add default paths for yacc tool on windows to include cygwin, mingw, and chocolatey - From Peter Diener: - - Fix issue #3135 - Handle Fortran submodules and type bound procedures - From Daniel Moody: - Change the default for AppendENVPath to delete_existing=0, so path order will not be changed, unless explicitly set (Issue #3276)
Extensions: beautify name unqualification Yes, that's a real word.
@@ -19,6 +19,11 @@ from bot.utils.checks import with_role_check log = logging.getLogger(__name__) +def unqualify(name: str) -> str: + """Return an unqualified name given a qualified module/package `name`.""" + return name.rsplit(".", maxsplit=1)[-1] + + def walk_extensions() -> t.Iterator[str]: """Yield extension names from the bot.exts subpackage.""" @@ -26,7 +31,7 @@ def walk_extensions() -> t.Iterator[str]: raise ImportError(name=name) # pragma: no cover for module in pkgutil.walk_packages(exts.__path__, f"{exts.__name__}.", onerror=on_error): - if module.name.rsplit(".", maxsplit=1)[-1].startswith("_"): + if unqualify(module.name).startswith("_"): # Ignore module/package names starting with an underscore. continue @@ -75,8 +80,7 @@ class Extension(commands.Converter): matches = [] for ext in EXTENSIONS: - name = ext.rsplit(".", maxsplit=1)[-1] - if argument == name: + if argument == unqualify(ext): matches.append(ext) if len(matches) > 1:
Remove_Exiled_Entries: strip obsolete foreign nodes TN:
@@ -797,7 +797,27 @@ package body ${ada_lib_name}.Analysis is -- Remove the `symbol -> AST node` associations that reference this -- unit's nodes from foreign lexical environments. AST_Envs.Remove (El.Env, El.Key, El.Node); + + -- Also filter the foreign's units foreign nodes information so that + -- it does not contain stale information (i.e. dangling pointers to + -- our nodes). + if El.Env.Env.Node /= null then + declare + Foreign_Nodes : ${root_node_type_name}_Vectors.Vector renames + Get_Lex_Env_Data (El.Env.Env.Node.Unit).Foreign_Nodes; + Current : Positive := Foreign_Nodes.First_Index; + begin + while Current <= Foreign_Nodes.Last_Index loop + if Foreign_Nodes.Get (Current) = El.Node then + Foreign_Nodes.Pop (Current); + else + Current := Current + 1; + end if; end loop; + end; + end if; + end loop; + Self.Exiled_Entries.Clear; end Remove_Exiled_Entries;
Fixes for supplierpart table Paginate on server side
@@ -724,6 +724,7 @@ function loadSupplierPartTable(table, url, options) { url: url, method: 'get', original: params, + sidePagination: 'server', queryParams: filters, name: 'supplierparts', groupBy: false,
Apply PR feedback: check if signing key and ID match
@@ -539,6 +539,12 @@ class Pubsub: if msg.signature == b"": logger.debug("Reject because no signature attached for msg: %s", msg) return + # Validate if message sender matches message signer, + # i.e., check if `msg.key` matches `msg.from_id` + msg_pubkey = deserialize_public_key(msg.key) + if ID.from_pubkey(msg_pubkey) != msg.from_id: + logger.debug("Reject because signing key does not match sender ID for msg: %s", msg) + return # Validate the signature of the message # First, construct the original payload that's signed by 'msg.key' msg_without_key_sig = rpc_pb2.Message( @@ -551,7 +557,7 @@ class Pubsub: PUBSUB_SIGNING_PREFIX.encode() + msg_without_key_sig.SerializeToString() ) if not signature_validator( - deserialize_public_key(msg.key), payload, msg.signature + msg_pubkey, payload, msg.signature ): logger.debug("Signature validation failed for msg: %s", msg) return
Update README.md First Draft README
-# RAPIDS notebooks -Visit the main RAPIDS [notebooks](https://github.com/rapidsai/notebooks) repo for a listing of all notebooks across all RAPIDS libraries. +# cuSpatial Notebooks +## Intro +These notebooks provide examples of how to use cuSpatial. Some of these notebooks are designed to be self-contained with the `runtime` version of the [RAPIDS Docker Container](https://hub.docker.com/r/rapidsai/rapidsai/) and [RAPIDS Nightly Docker Containers](https://hub.docker.com/r/rapidsai/rapidsai-nightly) and can run on air-gapped systems, while others require an additional download. You can quickly get this container using the install guide from the [RAPIDS.ai Getting Started page](https://rapids.ai/start.html#get-rapids) + +## Getting started notebooks +For a good overview of how cuSpatial works, go to [our precompiled docs (external link)](https://docs.rapids.ai/api/cuspatial/stable/api.html) or [build the docs them locally yourself](../docs/source/) in the +documentation tree, as well as read [our introductory blog (external link)](https://medium.com/rapids-ai/releasing-cuspatial-to-accelerate-geospatial-and-spatiotemporal-processing-b686d8b32a9) + +## Notebook Information +Notebook Title | Data set(s) | Notebook Description | External Download (Size) +--- | --- | --- | --- +[NYC Taxi Years Correlation](nyc_taxi_years_correlation.ipynb) | [NYC Taxi Yellow 01/2016, 01/2017, taxi zone data](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page) | Demonstrates using Point in Polygon to correlate the NYC Taxi datasets pre-2017 `lat/lon` locations with the post-2017 `LocationID` for cross format comparisons. | Yes (~3GB) + +## For more details +Many more examples can be found in the [RAPIDS Notebooks +Contrib](https://github.com/rapidsai/notebooks-contrib) repository, +which contains community-maintained notebooks.
Fixing total translation count for translations import. Fixes
@@ -210,6 +210,7 @@ class Importer(object): existing_po_file = self.pod.open_file(pod_po_path, mode='w') pofile.write_po(existing_po_file, existing_catalog, width=80, sort_output=True, sort_by_file=True) + total_translations = len(catalog_to_merge) else: # Skip new catalogs if not including obsolete messages. if not self.include_obsolete:
Make rac, rc kwargs on UnmatchedOrder Resolves liampauling/betfairlightweight#97
@@ -390,8 +390,8 @@ class MarketBookCache(BaseResource): class UnmatchedOrder(object): - def __init__(self, id, p, s, side, status, pt, ot, pd, sm, sr, sl, sc, sv, rac, rc, rfo, rfs, - md=None, avp=None, bsp=None, ld=None): + def __init__(self, id, p, s, side, status, pt, ot, pd, sm, sr, sl, sc, sv, rfo, rfs, + md=None, avp=None, bsp=None, ld=None, rac=None, rc=None): self.bet_id = id self.price = p self.size = s
Fix bug with multiple model prefetches I found when calling prefetch with a model mulitple times but on different rel models would result in only one of the rel models getting the instances populated and all others with empty lists. This changes seems to fix the issue.
@@ -7439,7 +7439,7 @@ def prefetch(sq, *subqueries): rel_map.setdefault(rel_model, []) rel_map[rel_model].append(pq) - deps[query_model] = {} + deps.setdefault(query_model, {}) id_map = deps[query_model] has_relations = bool(rel_map.get(query_model))
Update to be able to use Pytest in TIR When I use TIR with PYTEST, it doesn't use the runner, so the 'test' attribute doesn't exist. I have made a correction that fix that problem.
@@ -139,7 +139,10 @@ class Log: Returns a list of test cases from suite """ runner = next(iter(list(filter(lambda x: "runner.py" in x.filename, inspect.stack())))) + try: return list(runner.frame.f_locals['test']) + except KeyError: + return [] def get_testcase_stack(self): """
Fix race in Mesos batch system Fixes Jobs were submitted to the queue *before* their taskResources were filled in. If the leader thread gets interrupted just before the taskResources are filled in, and the Mesos driver thread calls resourceOffers, the driver will crash and the pipeline will stall.
@@ -190,8 +190,8 @@ class MesosBatchSystem(BatchSystemLocalSupport, # TODO: round all elements of resources - self.jobQueues.insertJob(job, jobType) self.taskResources[jobID] = job.resources + self.jobQueues.insertJob(job, jobType) log.debug("... queued") return jobID
Fix error in master/pyomeca in one linestyle ("-." instead of inexistant ".-") Dunno why that didn't raise an error until my recent changes
@@ -55,7 +55,7 @@ class PlotOcp: self.ocp = ocp self.plot_options = { "general_options": {"use_tight_layout": True}, - "non_integrated_plots": {"linestyle": ".-", "markersize": 3}, + "non_integrated_plots": {"linestyle": "-.", "markersize": 3}, "integrated_plots": {"linestyle": "-", "markersize": 3, "linewidth": 1.1}, "bounds": {"color": "k", "linewidth": 0.4, "linestyle": "-"}, "grid": {"color": "k", "linestyle": "-", "linewidth": 0.15},
indexing bug Found and fixed an indexing bug.
@@ -149,10 +149,14 @@ def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[], # Flatten both data sets, since accuracy routines require 1D arrays inst_dat = pairs.data_vars[iname].values.flatten() + # Ensure no NaN are used in statistics + inum = np.where(~np.isnan(mod_scaled) & ~np.isnan(inst_dat))[0] + # Calculate all of the desired statistics for mm in methods: try: - stat_dict[iname][mm] = method_rout[mm](mod_scaled, inst_dat) + stat_dict[iname][mm] = method_rout[mm](mod_scaled[inum], + inst_dat[inum]) # Convenience functions add layers to the output, remove # these layers
Remove unnecessary test_request_context manager This doesn't affect how the tests run and just adds complexity.
@@ -167,7 +167,6 @@ def test_delete_letter_notifications_older_than_retention_calls_child_task(notif def test_timeout_notifications_after_timeout(notify_api, sample_template): - with notify_api.test_request_context(): not1 = create_notification( template=sample_template, status='sending', @@ -190,7 +189,6 @@ def test_timeout_notifications_after_timeout(notify_api, sample_template): def test_timeout_notifications_before_timeout(notify_api, sample_template): - with notify_api.test_request_context(): not1 = create_notification( template=sample_template, status='sending',
Fixup: `get_last_submission_time_for_users()` set `for_export=True` to match behavior prior to
@@ -37,7 +37,7 @@ from corehq.util.quickcache import quickcache PagedResult = namedtuple('PagedResult', 'total hits') -def get_last_submission_time_for_users(domain, user_ids, datespan, for_export=True): +def get_last_submission_time_for_users(domain, user_ids, datespan, for_export=False): def convert_to_date(date): return string_to_datetime(date).date() if date else None query = (
DOC: updated template docstring Updated a template docstring to include a better example for future developers.
@@ -372,12 +372,11 @@ def list_remote_files(tag, inst_id, user=None, password=None): Parameters ----------- - tag : str or NoneType + tag : str Denotes type of file to load. Accepted types are <tag strings>. - (default=None) - inst_id : str or NoneType - Specifies the satellite ID for a constellation. Not used. - (default=None) + inst_id : str + Specifies the satellite or instrument ID. Accepted types are + <inst_id strings>. user : str or NoneType Username to be passed along to resource with relevant data. (default=None)
Use global user cache to fetch reaction event data. Also make sure it isn't dispatched unless the data meets the integrity checks (i.e. not None).
@@ -321,6 +321,7 @@ class ConnectionState: if message is not None: reaction = message._add_reaction(data) user = self._get_reaction_user(message.channel, int(data['user_id'])) + if user: self.dispatch('reaction_add', reaction, user) def parse_message_reaction_remove_all(self, data): @@ -339,6 +340,7 @@ class ConnectionState: pass else: user = self._get_reaction_user(message.channel, int(data['user_id'])) + if user: self.dispatch('reaction_remove', reaction, user) def parse_presence_update(self, data): @@ -721,14 +723,9 @@ class ConnectionState: self.dispatch('relationship_remove', old) def _get_reaction_user(self, channel, user_id): - if isinstance(channel, DMChannel) and user_id == channel.recipient.id: - return channel.recipient - elif isinstance(channel, TextChannel): + if isinstance(channel, TextChannel): return channel.guild.get_member(user_id) - elif isinstance(channel, GroupChannel): - return utils.find(lambda m: m.id == user_id, channel.recipients) - else: - return None + return self.get_user(user_id) def get_reaction_emoji(self, data): emoji_id = utils._get_as_snowflake(data, 'id')
small typo in using slots doc Make the name of the action line up with the rest of the example
@@ -123,7 +123,7 @@ When the ``fetch_profile`` action is run, it returns a class FetchProfileAction(Action): def name(self): - return "fetch_profile" + return "action_fetch_profile" def run(self, dispatcher, tracker, domain): url = "http://myprofileurl.com"
GeneratorFactory: make getCategory respect self.site Previous behavior is to generate the pages with the site in user-config, regardless of the value in self.site.
@@ -544,6 +544,7 @@ class GeneratorFactory(object): categoryname = u'{0}:{1}'.format(self.site.namespace(14), categoryname) cat = pywikibot.Category(pywikibot.Link(categoryname, + source=self.site, defaultNamespace=14)) return cat, startfrom
Removed a +1 in nr of shots on advice of Niels H Tested with SSRO. No problems introduced by change.
@@ -1381,7 +1381,6 @@ class UHFQC_input_average_detector(Hard_Detector): self.AWG = AWG self.nr_samples = nr_samples self.nr_averages = nr_averages - print(nr_samples) def get_values(self): self.UHFQC.quex_rl_readout(0) # resets UHFQC internal readout counters @@ -1592,7 +1591,7 @@ class UHFQC_integration_logging_det(Hard_Detector): # the loop self.UHFQC.awgs_0_single(1) self.UHFQC.awgs_0_userregs_1(0) # 0 for rl, 1 for iavg - self.UHFQC.awgs_0_userregs_0(self.nr_shots+1) + self.UHFQC.awgs_0_userregs_0(self.nr_shots) self.UHFQC.quex_rl_length(self.nr_shots) self.UHFQC.quex_rl_avgcnt(0) # 1 for single shot readout self.UHFQC.quex_wint_length(int(self.integration_length*(1.8e9)))
Update feedpress-takeover.yaml As the issue indicated, it's no longer vulnerable, since 2020/9.
@@ -3,9 +3,9 @@ id: feedpress-takeover info: name: Agilecrm Takeover Detection author: pdteam - severity: high + severity: info reference: - - https://github.com/EdOverflow/can-i-take-over-xyz + - https://github.com/EdOverflow/can-i-take-over-xyz/issues/80 tags: takeover requests:
Updated REDME Fixed some errors.
@@ -7,16 +7,16 @@ This module contains some useful classes and functions for dealing with linear a ## Overview - class Vector - - This class represents a vector of arbitray size and operations on it. + - This class represents a vector of arbitrary size and operations on it. **Overview about the methods:** - constructor(components : list) : init the vector - - set(components : list) : changes the vector components. + - set(components : list) : changes the vector components - __str__() : toString method - - component(i : int): gets the i-th component (start by 0) + - component(i : int): gets the i-th component (start with 0) - size() : gets the size of the vector (number of components) - - euclidLength() : returns the eulidean length of the vector. + - euclidLength() : returns the euclidean length of the vector. - operator + : vector addition - operator - : vector subtraction - operator * : scalar multiplication and dot product @@ -58,20 +58,20 @@ This module contains some useful classes and functions for dealing with linear a The module is well documented. You can use the python in-built ```help(...)``` function. For instance: ```help(Vector)``` gives you all information about the Vector-class. -Or ```help(unitBasisVector)``` gives you all information you needed about the -global function ```unitBasisVector(...)```. If you need informations about a certain +Or ```help(unitBasisVector)``` gives you all information you need about the +global function ```unitBasisVector(...)```. If you need information about a certain method you type ```help(CLASSNAME.METHODNAME)```. --- ## Usage -You will find the module in the **src** directory its called ```lib.py```. You need to -import this module in your project. Alternative you can also use the file ```lib.pyc``` in python-bytecode. +You will find the module in the **src** directory called ```lib.py```. You need to +import this module in your project. Alternatively you can also use the file ```lib.pyc``` in python-bytecode. --- ## Tests -In the **src** directory you also find the test-suite, its called ```tests.py```. +In the **src** directory you can also find the test-suite, its called ```tests.py```. The test-suite uses the built-in python-test-framework **unittest**.
Update file.py There were problems with Image links from Facebook for example, because they have and need ?parameter1=abc&parameter2=def in their address.
@@ -329,7 +329,12 @@ def setup_folder_path(filename, new_parent): def get_extension(filename, extn, content): mimetype = None + if extn: + # remove '?' char and parameters from extn if present + if '?' in extn: + extn = extn.split('?', 1)[0] + mimetype = mimetypes.guess_type(filename + "." + extn)[0] if mimetype is None or not mimetype.startswith("image/") and content:
get-pip The URL of get-pip.py has changed to
@@ -187,7 +187,7 @@ function mn_deps { $install ${PYPKG}-pip || $install ${PYPKG}-pip-whl if ! ${PYTHON} -m pip -V; then if [ $PYTHON_VERSION == 2 ]; then - wget https://bootstrap.pypa.io/2.6/get-pip.py + wget https://bootstrap.pypa.io/pip/2.6/get-pip.py else wget https://bootstrap.pypa.io/get-pip.py fi
fcntl: make mutate_flag optional for ioctl w/ read-only buffer Fixes
@@ -111,7 +111,7 @@ def ioctl(__fd: FileDescriptorLike, def ioctl(__fd: FileDescriptorLike, __request: int, __arg: _ReadOnlyBuffer, - __mutate_flag: bool) -> bytes: ... + __mutate_flag: bool = ...) -> bytes: ... def flock(__fd: FileDescriptorLike, __operation: int) -> None: ... def lockf(__fd: FileDescriptorLike, __cmd: int,
Set utf-8 encoding for reading the logo This appears primarily in Windows CLIs, see Also `termios` is not supported on Windows, however I haven't yet looked into an alternative.
@@ -75,7 +75,7 @@ def play_aidungeon_2(): story_manager = UnconstrainedStoryManager(generator) print("\n") - with open('opening.txt', 'r') as file: + with open('opening.txt', 'r', encoding='utf-8') as file: starter = file.read() print(starter)
Modified version The default value is version object ,not None exec cmd "hagrid launch domain" ,ERROR log is "Error: '>' not supported between instances of 'NoneType' and 'Version'"
@@ -82,7 +82,7 @@ class Dependency: name: str = "" display: str = "" only_os: str = "" - version: Optional[Version] = None + version: Optional[Version] = version.parse("None") valid: bool = False issues: List[SetupIssue] = field(default_factory=list) output_in_text: bool = False @@ -165,6 +165,7 @@ class DependencyGridDockerCompose(Dependency): binary_info = BinaryInfo( binary="docker", version_cmd="docker compose version" ).get_binary_info() + if binary_info.path and binary_info.version > version.parse( MINIMUM_DOCKER_COMPOSE_VERSION ): @@ -294,7 +295,7 @@ class BinaryInfo: version_cmd: str error: Optional[str] = None path: Optional[str] = None - version: Optional[Union[str, Version]] = None + version: Optional[Union[str, Version]] = version.parse("None") version_regex = ( r"[^\d]*(" + r"(0|[1-9][0-9]*)\.*(0|[1-9][0-9]*)\.*(0|[1-9][0-9]*)"
Update search.js make doctype in search result summary translatable
@@ -303,7 +303,7 @@ frappe.search.SearchDialog = class { let $results_list = $(`<div class="results-summary"> <div class="result-section full-list ${type}-section col-sm-12"> - <div class="result-title">${type}</div> + <div class="result-title"> ` + __(type) + `</div> <div class="result-body"> </div> </div> @@ -340,7 +340,7 @@ frappe.search.SearchDialog = class { } let $result_section = $(`<div class="col-sm-12 result-section" data-type="${type}"> - <div class="result-title">${type}</div> + <div class="result-title">${__(type)}</div> <div class="result-body"> ${more_html} </div>
Always handle multiple image files as a stack of images. Don't allow merging SPEC and image formats in a single command.
@@ -33,6 +33,7 @@ import re import time import silx.io +from silx.io.specfile import is_specfile try: from silx.io import fabioh5 @@ -126,6 +127,26 @@ def drop_indices_after_end(filenames, regex, end): return output_filenames +def are_all_specfile(filenames): + """Return True if all files in a list are SPEC files. + :param List[str] filenames: list of filenames + """ + for fname in filenames: + if not is_specfile(fname): + return False + return True + + +def contains_specfile(filenames): + """Return True if any file in a list are SPEC files. + :param List[str] filenames: list of filenames + """ + for fname in filenames: + if is_specfile(fname): + return True + return False + + def main(argv): """ Main function to launch the converter as an application @@ -137,11 +158,15 @@ def main(argv): parser.add_argument( 'input_files', nargs="*", - help='Input files (EDF, SPEC).') + help='Input files (EDF, TIFF, SPEC...). When specifying multiple ' + 'files, you cannot specify both fabio images and SPEC files. ' + 'Multiple SPEC files will simply be concatenated, with one ' + 'entry per scan. Multiple image files will be merged into ' + 'a single entry with a stack of images.') # input_files and --filepattern are mutually exclusive parser.add_argument( '--file-pattern', - help='File name pattern for loading a series of indexed files ' + help='File name pattern for loading a series of indexed image files ' '(toto_%%04d.edf). This argument is incompatible with argument ' 'input_files. If an output URI with a HDF5 path is provided, ' 'only the content of the NXdetector group will be copied there. ' @@ -408,8 +433,10 @@ def main(argv): if options.fletcher32: create_dataset_args["fletcher32"] = True - if options.file_pattern is not None: - # File series + if (len(options.input_files) > 1 and + not contains_specfile(options.input_files)) or\ + options.file_pattern is not None: + # File series -> stack of images if fabioh5 is None: # return a helpful error message if fabio is missing try: @@ -433,8 +460,8 @@ def main(argv): create_dataset_args=create_dataset_args, min_size=options.min_size) - else: - # single file or unrelated files + elif len(options.input_files) == 1 or are_all_specfile(options.input_files): + # single file, or spec files h5paths_and_groups = [] for input_name in options.input_files: hdf5_path_for_file = hdf5_path @@ -459,6 +486,13 @@ def main(argv): create_dataset_args=create_dataset_args, min_size=options.min_size) + else: + # multiple file, SPEC and fabio images mixed + _logger.error("Multiple files with incompatible formats specified. " + "You can provide multiple SPEC files or multiple image " + "files, but not both.") + return -1 + with h5py.File(output_name, mode="r+") as h5f: # append "silx convert" to the creator attribute, for NeXus files previous_creator = h5f.attrs.get("creator", b"").decode()
appease pylint Use the context manager form of subprocess.Popen in tests.
# pylint: disable=missing-docstring import os -import socket -import subprocess import sys -import time +from socket import socket +from subprocess import PIPE +from subprocess import Popen +from subprocess import STDOUT +from time import sleep +from time import time import pytest @@ -16,42 +19,43 @@ if "BEANCOUNT_FILE" in os.environ: del os.environ["BEANCOUNT_FILE"] -def get_port(): - sock = socket.socket() +def get_port() -> int: + sock = socket() sock.bind((HOST, 0)) port = sock.getsockname()[1] sock.close() return port -def output_contains(process, output, timeout): - endtime = time.time() + timeout +def output_contains(process: Popen, output: str, timeout: int) -> bool: + endtime = time() + timeout while True: - if time.time() > endtime: + if time() > endtime or not process.stdout: return False if output in process.stdout.readline(): return True - time.sleep(0.1) - - -def run_fava(args=()): - return subprocess.Popen( - FAVA + args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True, - ) + sleep(0.1) @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") -def test_cli(): +def test_cli() -> None: port = str(get_port()) args = (EXAMPLE_FILE, "-d", "-p", port) - process = run_fava(args) + with Popen( + FAVA + args, + stdout=PIPE, + stderr=STDOUT, + universal_newlines=True, + ) as process: assert output_contains(process, "Running on", 20) - process2 = run_fava(args) + with Popen( + FAVA + args, + stdout=PIPE, + stderr=STDOUT, + universal_newlines=True, + ) as process2: process2.wait() process.terminate() - process.wait() + assert process2.stdout assert "in use" in "".join(process2.stdout.readlines()) assert process2.returncode == 2
BUG: fix win32 np.clip slowness The use of the macro _NPY_CLIP results in multiple re-evaluations of the input arguments. Thus for floating point types, the check of NaNs is performed multiple times. This manifests itself as a slowness on Win32 builds. See
* npy_datetime, npy_timedelta# */ -#define _NPY_CLIP(x, min, max) \ - _NPY_@name@_MIN(_NPY_@name@_MAX((x), (min)), (max)) - NPY_NO_EXPORT void @name@_clip(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { @@ -95,25 +92,33 @@ NPY_NO_EXPORT void /* contiguous, branch to let the compiler optimize */ if (is1 == sizeof(@type@) && os1 == sizeof(@type@)) { for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { - *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val); + @type@ t = *(@type@ *)ip1; + t = _NPY_@name@_MAX(t, min_val); + t = _NPY_@name@_MIN(t, max_val); + *(@type@ *)op1 = t; } } else { for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { - *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val); + @type@ t = *(@type@ *)ip1; + t = _NPY_@name@_MAX(t, min_val); + t = _NPY_@name@_MIN(t, max_val); + *(@type@ *)op1 = t; } } } else { TERNARY_LOOP { - *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, *(@type@ *)ip2, *(@type@ *)ip3); + @type@ t = *(@type@ *)ip1; + t = _NPY_@name@_MAX(t, *(@type@ *)ip2); + t = _NPY_@name@_MIN(t, *(@type@ *)ip3); + *(@type@ *)op1 = t; } } npy_clear_floatstatus_barrier((char*)dimensions); } // clean up the macros we defined above -#undef _NPY_CLIP #undef _NPY_@name@_MAX #undef _NPY_@name@_MIN
Fix mock emulating lvm version 'lvm version' produces multiline output whereas test_version and test_fullversion use mocks with single-line output. Use real-life 'lvm version' output in those mocks instead.
@@ -34,18 +34,29 @@ class LinuxLVMTestCase(TestCase): ''' Tests LVM version info from lvm version ''' - mock = MagicMock(return_value='Library version : 1') + mock = MagicMock(return_value= + ' LVM version: 2.02.168(2) (2016-11-30)\n' + ' Library version: 1.03.01 (2016-11-30)\n' + ' Driver version: 4.35.0\n' + ) with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}): - self.assertEqual(linux_lvm.version(), '1') + self.assertEqual(linux_lvm.version(), '2.02.168(2) (2016-11-30)') def test_fullversion(self): ''' Tests all version info from lvm version ''' - mock = MagicMock(return_value='Library version : 1') + mock = MagicMock(return_value= + ' LVM version: 2.02.168(2) (2016-11-30)\n' + ' Library version: 1.03.01 (2016-11-30)\n' + ' Driver version: 4.35.0\n' + ) with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}): self.assertDictEqual(linux_lvm.fullversion(), - {'Library version': '1'}) + {'LVM version': '2.02.168(2) (2016-11-30)', + 'Library version': '1.03.01 (2016-11-30)', + 'Driver version': '4.35.0', + }) def test_pvdisplay(self): '''
Use hashlib instead of sha library. The sha library was deprecated in Python 2.5.
@@ -19,10 +19,10 @@ to guard the display of sensitive information.""" __author__ = '[email protected] (Ka-Ping Yee)' import cgi +import hashlib import os import pickle import random -import sha import time import urlparse @@ -36,7 +36,7 @@ REVEAL_KEY_LENGTH = 20 def sha1_hash(string): """Computes the SHA-1 hash of the given string.""" - return sha.new(string).digest() + return hashlib.sha1(string).digest() def xor(string, byte): """Exclusive-ors each character in a string with the given byte."""
Retry `docker push` 5 times Hopefully this avoids annoying timeouts that we get in our CI when pushing to quay.io. Fixes
@@ -221,7 +221,7 @@ obliterate_docker: clean_docker -docker images -qf dangling=true | xargs docker rmi push_docker: docker check_docker_registry - docker push $(docker_image):$(docker_tag) + for i in $$(seq 1 5); do docker push $(docker_image):$(docker_tag) && break || sleep 60; done else
Change repeat record attempt from a <pre/> to a <div/> When the response from the server doesn't contain newlines, you have to scroll all the way over to read the error message and it's kinda annoying. This keeps it visually separated, but will break lines.
{% block js-inline %}{{ block.super }} <script> $(function() { - $('#report-content').on('click', '.toggle-next-pre', function (e) { - $(this).nextAll('pre').toggle(); + $('#report-content').on('click', '.toggle-next-attempt', function (e) { + $(this).nextAll('.record-attempt').toggle(); e.preventDefault(); }); var codeMirror = null;
Update publish-flow-onchain.md small tweak: ocean no longer has `config` attribute. But we have access to `config` object directly anyway, so use that Note: even after this fix, the README still has an error. See
@@ -40,7 +40,7 @@ contract_abi = { from ocean_lib.ocean.util import get_address_of_type from ocean_lib.models.factory_router import FactoryRouter -contract_address = get_address_of_type(ocean.config, FactoryRouter.CONTRACT_NAME) +contract_address = get_address_of_type(config, FactoryRouter.CONTRACT_NAME) #create asset asset = ocean.assets.create_onchain_asset(name, contract_address, contract_abi, xalice_wallet)
Fix homogenize not working with tuples Also fixes data corruption bug due to lists being mutable.
@@ -64,7 +64,7 @@ def homogenize(self, key, compare_values, default_row=None): rows.append(Row(default_row(difference), self._column_names)) else: if default_row is not None: - new_row = default_row + new_row = list(default_row) else: new_row = [None] * (len(self._column_names) - len(key))
[dagster-aws cli] Wait for EC2 instance status OK Summary: This addresses (4) in Test Plan: manual Reviewers: #ft, alangenfeld Subscribers: schrockn, alangenfeld
@@ -126,7 +126,7 @@ def init(): else: key_pair_name, key_file_path = create_key_pair(client, dagster_home) - inst = create_ec2_instance(ec2, security_group_id, ami_id, key_pair_name) + inst = create_ec2_instance(client, ec2, security_group_id, ami_id, key_pair_name) # Save host configuration for future commands cfg = HostConfig(
Remove "fxedit" static library from the "script/create-dist" config. The library was removed.
@@ -109,7 +109,6 @@ BINARIES_SHARED_LIBRARY = { os.path.join('obj', 'third_party', 'pdfium', 'libfpdftext.a'), os.path.join('obj', 'third_party', 'pdfium', 'libfxcodec.a'), os.path.join('obj', 'third_party', 'pdfium', 'libfxcrt.a'), - os.path.join('obj', 'third_party', 'pdfium', 'libfxedit.a'), os.path.join('obj', 'third_party', 'pdfium', 'libfxge.a'), os.path.join('obj', 'third_party', 'pdfium', 'libfxjs.a'), os.path.join('obj', 'third_party', 'pdfium', 'libjavascript.a'), @@ -132,7 +131,6 @@ BINARIES_SHARED_LIBRARY = { os.path.join('obj', 'third_party', 'pdfium', 'libfpdftext.a'), os.path.join('obj', 'third_party', 'pdfium', 'libfxcodec.a'), os.path.join('obj', 'third_party', 'pdfium', 'libfxcrt.a'), - os.path.join('obj', 'third_party', 'pdfium', 'libfxedit.a'), os.path.join('obj', 'third_party', 'pdfium', 'libfxge.a'), os.path.join('obj', 'third_party', 'pdfium', 'libfxjs.a'), os.path.join('obj', 'third_party', 'pdfium', 'libjavascript.a'), @@ -172,8 +170,6 @@ BINARIES_SHARED_LIBRARY = { os.path.join('obj', 'third_party', 'pdfium', 'fxcodec.lib'), os.path.join('obj', 'third_party', 'pdfium', 'fxcrt_cc.pdb'), os.path.join('obj', 'third_party', 'pdfium', 'fxcrt.lib'), - os.path.join('obj', 'third_party', 'pdfium', 'fxedit_cc.pdb'), - os.path.join('obj', 'third_party', 'pdfium', 'fxedit.lib'), os.path.join('obj', 'third_party', 'pdfium', 'fxge_cc.pdb'), os.path.join('obj', 'third_party', 'pdfium', 'fxge.lib'), os.path.join('obj', 'third_party', 'pdfium', 'fxjs_cc.pdb'),
Convert other get_url requests to wget command calls wget provides better support for retrying and timeout delays
# openjpeg - name: Download openjpeg - get_url: - url: https://github.com/uclouvain/openjpeg/archive/v2.1.2.tar.gz - dest: "{{ root_dir }}/openjpeg-2.1.2.tar.gz" - checksum: "sha1:c8671e7f577fdc58abde1e1f32b10d372e6f9b07" + command: >- + wget --retry-connrefused --waitretry=1 --read-timeout=300 https://github.com/uclouvain/openjpeg/archive/v2.1.2.tar.gz -O openjpeg-v2.1.2.tar.gz + args: + chdir: "{{ root_dir }}" + creates: openjpeg-v2.1.2.tar.gz - name: Extract openjpeg unarchive: # libtiff - name: Download libtiff - command: >- wget --retry-connrefused --waitretry=1 --read-timeout=300 http://download.osgeo.org/libtiff/tiff-4.0.6.tar.gz -O tiff-4.0.6.tar.gz args: # Openslide - name: Download openslide - get_url: - url: https://github.com/openslide/openslide/archive/v3.4.1.tar.gz - dest: "{{ root_dir }}/openslide-3.4.1.tar.gz" - checksum: "sha1:bd891d20434c9d99d71c3125f990f42755b0ebec" + command: >- + wget --retry-connrefused --waitretry=1 --read-timeout=300 https://github.com/openslide/openslide/archive/v3.4.1.tar.gz -O openslide-v3.4.1.tar.gz + args: + chdir: "{{ root_dir }}" + creates: openslide-v3.4.1.tar.gz - name: Extract openslide unarchive:
wizard: let UserCancelled propagate out in hw wallet flow follow
@@ -393,8 +393,10 @@ class BaseWizard(Logger): # will need to re-pair devmgr.unpair_id(device_info.device.id_) raise ChooseHwDeviceAgain() - except (UserCancelled, GoBack): + except GoBack: raise ChooseHwDeviceAgain() + except (UserCancelled, ReRunDialog): + raise except UserFacingException as e: self.show_error(str(e)) raise ChooseHwDeviceAgain()
Produce coredumps in Travis on test failure. This can help if we're crashing for some reason, which will happen more and more as our C++ library gets bigger.
@@ -14,6 +14,15 @@ env: install: - pip install pipenv - pipenv install --dev --deploy + - sudo apt-get install -y gdb # install gdb + +before_script: + - ulimit -c unlimited -S # enable core dumps + +after_failure: + - PYTHON_EXECUTABLE=$(python3 -c "import sys; print(sys.executable)") + - COREFILE=$(find . -maxdepth 1 -name "core*" | head -n 1) # find core file + - if [[ -f "$COREFILE" ]]; then gdb -c "$COREFILE" $PYTHON_EXECUTABLE -ex "thread apply all bt" -ex "set pagination 0" -batch; fi script: - make testcert.cert
Restricts Voice Silence Skip To Mod Roles Raises the permission required to not be muted during a voice silence to moderation roles.
@@ -282,7 +282,10 @@ class Silence(commands.Cog): log.debug(f"Removing all non staff members from #{channel.name} ({channel.id}).") for member in channel.members: - if self._helper_role not in member.roles: + # Skip staff + if any(role.id in constants.MODERATION_ROLES for role in member.roles): + continue + try: await member.move_to(None, reason="Kicking member from voice channel.") log.debug(f"Kicked {member.name} from voice channel.") @@ -306,7 +309,7 @@ class Silence(commands.Cog): # Move all members to temporary channel and back for member in channel.members: # Skip staff - if self._helper_role in member.roles: + if any(role.id in constants.MODERATION_ROLES for role in member.roles): continue try:
Round unitsPerEm Fix
@@ -240,7 +240,7 @@ class BaseOutlineCompiler(object): fullFontRevision, head.fontRevision) # upm - head.unitsPerEm = getAttrWithFallback(font.info, "unitsPerEm") + head.unitsPerEm = round(getAttrWithFallback(font.info, "unitsPerEm")) # times head.created = dateStringToTimeValue(getAttrWithFallback(font.info, "openTypeHeadCreated")) - mac_epoch_diff
Some fixes in Huawei.VRP cm parser HG-- branch : feature/microservices
@@ -260,12 +260,14 @@ class BaseVRPParser(BaseParser): """ info-center loghost 10.46.147.5 channel 9 """ + if len(tokens) > 2: self.get_sysloghost_fact(tokens[2]) def on_ntp_server(self, tokens): """ ntp-service unicast-server 1.1.1.1 """ + if len(tokens) > 2: self.get_ntpserver_fact(tokens[2]) def on_system_domain_name(self, tokens): @@ -316,6 +318,7 @@ class BaseVRPParser(BaseParser): undo port hybrid vlan 1 port hybrid untagged vlan 1223 2478 port hybrid vlan 1757 untagged + port hybrid vlan 1757 tagged """ si = self.get_current_subinterface() @@ -325,6 +328,8 @@ class BaseVRPParser(BaseParser): else: if tokens[-1] == "untagged": si.untagged_vlan = int(tokens[-2]) + elif tokens[-1] == "tagged": + si.tagged_vlan += [int(tokens[-2])] else: si.untagged_vlan = int(tokens[4]) si.add_afi("BRIDGE")
Filter UserWarning in QASM tests The test files specifically include constructs that aren't entirely supported yet, so it's correct for them to warn. It's also correct for us to assert that the warning occurs, though.
@@ -73,6 +73,7 @@ def check_measurement_defn(gate, gate_name, targets, classical_store): def test_qasm_addcircuit(): filename = "test_add.qasm" filepath = Path(__file__).parent / 'qasm_files' / filename + with pytest.warns(UserWarning, match="not preserved in QubitCircuit"): qc = read_qasm(filepath) assert qc.N == 2 assert qc.num_cbits == 2 @@ -100,6 +101,7 @@ def test_custom_gates(): def test_qasm_teleportation(): filename = "teleportation.qasm" filepath = Path(__file__).parent / 'qasm_files' / filename + with pytest.warns(UserWarning, match="not preserved in QubitCircuit"): teleportation = read_qasm(filepath) final_measurement = Measurement("start", targets=[2]) initial_measurement = Measurement("start", targets=[0])
Fix consistence check for elements location This was failing when there was no point mass added to the rotor.
@@ -284,7 +284,12 @@ class Rotor(object): self.df_seals = df_seals # check consistence for disks and bearings location - if df.n_l.max() > df_shaft.n_r.max() and df.n_l.max() > df_point_mass.n.max(): + if len(df_point_mass) > 0: + max_loc_point_mass = df_point_mass.n.max() + else: + max_loc_point_mass = 0 + max_location = max(df_shaft.n_r.max(), max_loc_point_mass) + if df.n_l.max() > max_location: raise ValueError("Trying to set disk or bearing outside shaft") self.df = df
Minor edits to doc tests and unit tests for landslides component fixed minor issues with doctests and unit tests Tests pass on local computer Ready to test pull request 480
@@ -41,16 +41,16 @@ def test_input_var_names(): """Testing if the input_var_names outputs the right list. """ assert_equal(sorted(ls_prob.input_var_names), - ['topographic__specific_contributing_area', - 'topographic__slope', - 'soil__transmissivity', - 'soil__saturated_hydraulic_conductivity', - 'soil__mode_total_cohesion', - 'soil__minimum_total_cohesion', - 'soil__maximum_total_cohesion', + ['soil__density', 'soil__internal_friction_angle', - 'soil__density', - 'soil__thickness']) + 'soil__maximum_total_cohesion', + 'soil__minimum_total_cohesion', + 'soil__mode_total_cohesion', + 'soil__saturated_hydraulic_conductivity', + 'soil__thickness', + 'soil__transmissivity', + 'topographic__slope', + 'topographic__specific_contributing_area']) @with_setup(setup_grid) @@ -58,8 +58,8 @@ def test_output_var_names(): """Testing if output_var_names outputs the right list. """ assert_equal(sorted(ls_prob.output_var_names), - ['soil__mean_relative_wetness', - 'landslide__probability_of_failure', + ['landslide__probability_of_failure', + 'soil__mean_relative_wetness', 'soil__probability_of_saturation'])
Address CVE-2019-10906 in loadgenerator Upgrade Jinja2 to version 2.10.1 * [CVE-2019-10906](https://nvd.nist.gov/vuln/detail/CVE-2019-10906) * [GitHub Security Alert](https://github.com/GoogleCloudPlatform/stackdriver-sandbox/network/alert/src/loadgenerator/requirements.txt/Jinja2/open)
@@ -12,7 +12,7 @@ gevent==1.4.0 # via locustio greenlet==0.4.15 # via gevent idna==2.8 # via requests itsdangerous==1.1.0 # via flask -jinja2==2.10 # via flask +jinja2==2.10.1 # via flask locustio==0.8.1 markupsafe==1.1.0 # via jinja2 msgpack-python==0.5.6 # via locustio
Update cloudbuild.yaml changed 'stesps' to 'steps'
# See the License for the specific language governing permissions and # limitations under the License. -# Purpose - This Google Cloudbuild configuration mimics the same stesps found +# Purpose - This Google Cloudbuild configuration mimics the same steps found # in .travis.yml. This could potentially be used in lieu of TravisCI steps:
Make revoke via CLI single threaded Fixing error - working outside of app context
:license: Apache, see LICENSE for more details. .. moduleauthor:: Kevin Glisson <[email protected]> """ -import multiprocessing import sys from flask import current_app from flask_principal import Identity, identity_changed @@ -26,9 +25,10 @@ from lemur.certificates.service import ( get_all_valid_certs, get, get_all_certs_attached_to_endpoint_without_autorotate, + revoke as revoke_certificate, ) from lemur.certificates.verify import verify_string -from lemur.constants import SUCCESS_METRIC_STATUS, FAILURE_METRIC_STATUS +from lemur.constants import SUCCESS_METRIC_STATUS, FAILURE_METRIC_STATUS, CRLReason from lemur.deployment import service as deployment_service from lemur.domains.models import Domain from lemur.endpoints import service as endpoint_service @@ -586,11 +586,10 @@ def worker(data, commit, reason): parts = [x for x in data.split(" ") if x] try: cert = get(int(parts[0].strip())) - plugin = plugins.get(cert.authority.plugin_name) print("[+] Revoking certificate. Id: {0} Name: {1}".format(cert.id, cert.name)) if commit: - plugin.revoke_certificate(cert, reason) + revoke_certificate(cert, reason) metrics.send( "certificate_revoke", @@ -641,13 +640,14 @@ def revoke(path, reason, message, commit): print("[!] Running in COMMIT mode.") print("[+] Starting certificate revocation.") + + if reason not in CRLReason.__members__: + reason = CRLReason.unspecified.name comments = {"comments": message, "crl_reason": reason} with open(path, "r") as f: - args = [[x, commit, comments] for x in f.readlines()[2:]] - - with multiprocessing.Pool(processes=3) as pool: - pool.starmap(worker, args) + for x in f.readlines()[2:]: + worker(x, commit, comments) @manager.command
[IMPR] Avoid deeply nested flow statements continue loop instead of indenting if-statement
@@ -523,9 +523,10 @@ def main(*args): load_global_archivo() for page in pregenerator: + if not page.exists() or page.namespace() != 6 or page.isRedirectPage(): + continue + skip = False - if page.exists() and page.namespace() == 6 \ - and not page.isRedirectPage(): imagepage = pywikibot.FilePage(page.site, page.title()) # First do autoskip.
Tree rearrange correction for operations. The tree now accesses the dataobjects directly rather than getting a copy of them.
@@ -1338,7 +1338,7 @@ class RootNode(list): self.tree_lookup = {} self.tree.SetImageList(self.tree_images) self.item = self.tree.AddRoot(self.name) - self.node_operations = Node(NODE_OPERATION_BRANCH, list(elements.ops()), self, self, name=_("Operations")) + self.node_operations = Node(NODE_OPERATION_BRANCH, elements._operations, self, self, name=_("Operations")) self.node_operations.set_icon(icons8_laser_beam_20.GetBitmap()) self.build_tree(self.node_operations, list(elements.ops())) for n in self.node_operations: @@ -1347,7 +1347,7 @@ class RootNode(list): else: n.set_icon(icons8_laser_beam_20.GetBitmap()) - self.node_elements = Node(NODE_ELEMENTS_BRANCH, list(elements.elems()), self, self, name=_("Elements")) + self.node_elements = Node(NODE_ELEMENTS_BRANCH, elements._elements, self, self, name=_("Elements")) self.node_elements.set_icon(icons8_vector_20.GetBitmap()) self.build_tree(self.node_elements, list(elements.elems()))
ci: fix gha deprecations for promote-rc action update docker/login-action to v2 remove usage of deprecated ::set-output
@@ -21,7 +21,7 @@ jobs: - name: "Install Deps" uses: ./.github/actions/setup-deps - name: "Docker Login" - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }} username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }} @@ -29,11 +29,11 @@ jobs: - id: step-main run: | make release/promote-oss/to-rc - echo "::set-output name=version::$(go run ./tools/src/goversion | sed s/^v//)" - echo "::set-output name=chart_version::$(go run ./tools/src/goversion --dir-prefix=chart | sed s/^v//)" + echo "version=$(go run ./tools/src/goversion | sed s/^v//)" >> $GITHUB_OUTPUT + echo "chart_version=$(go run ./tools/src/goversion --dir-prefix=chart | sed s/^v//)" >> $GITHUB_OUTPUT - id: check-slack-webhook name: Assign slack webhook variable - run: echo '::set-output name=slack_webhook_url::${{secrets.SLACK_WEBHOOK_URL}}' + run: echo "slack_webhook_url=${{secrets.SLACK_WEBHOOK_URL}}" >> $GITHUB_OUTPUT - name: Slack notification if: steps.check-slack-webhook.outputs.slack_webhook_url && always() uses: edge/simple-slack-notify@master
Fix use of FakeRepository in tests that change current dir outside of the repo root, e.g. via tmpdir_cwd, or when running any test from a different working dir.
@@ -32,16 +32,16 @@ from piptools.utils import ( make_install_requirement, ) -from .constants import MINIMAL_WHEELS_PATH +from .constants import MINIMAL_WHEELS_PATH, TEST_DATA_PATH from .utils import looks_like_ci class FakeRepository(BaseRepository): def __init__(self): - with open("tests/test_data/fake-index.json") as f: + with open(os.path.join(TEST_DATA_PATH, "fake-index.json")) as f: self.index = json.load(f) - with open("tests/test_data/fake-editables.json") as f: + with open(os.path.join(TEST_DATA_PATH, "fake-editables.json")) as f: self.editables = json.load(f) def get_hashes(self, ireq):
Confirm no delivery estimate for emails and SMS Only letters have a delivery estimate (which we calculate). This commit adds a test to make sure this remains the case.
@@ -9,6 +9,11 @@ from tests.app.db import ( create_template, create_service) +from tests.app.conftest import ( + sample_notification, + sample_email_notification, +) + @pytest.mark.parametrize('billable_units, provider', [ (1, 'mmg'), @@ -233,6 +238,26 @@ def test_get_notification_adds_delivery_estimate_for_letters( assert json_response['estimated_delivery'] == estimated_delivery [email protected]('notification_mock', [ + sample_notification, + sample_email_notification, +]) +def test_get_notification_doesnt_have_delivery_estimate_for_non_letters( + client, + notify_db, + notify_db_session, + notification_mock, +): + mocked_notification = notification_mock(notify_db, notify_db_session) + auth_header = create_authorization_header(service_id=mocked_notification.service_id) + response = client.get( + path='/v2/notifications/{}'.format(mocked_notification.id), + headers=[('Content-Type', 'application/json'), auth_header] + ) + assert response.status_code == 200 + assert 'estimated_delivery' not in json.loads(response.get_data(as_text=True)) + + def test_get_all_notifications_returns_200(client, sample_template): notifications = [create_notification(template=sample_template) for _ in range(2)] notification = notifications[-1]
Add possibility to add bearing outside shaft Bearing can now be added outside the shaft (on a point mass).
@@ -284,7 +284,7 @@ class Rotor(object): self.df_seals = df_seals # check consistence for disks and bearings location - if df.n_l.max() > df_shaft.n_r.max(): + if df.n_l.max() > df_shaft.n_r.max() and df.n_l.max() > df_point_mass.n.max(): raise ValueError("Trying to set disk or bearing outside shaft") self.df = df
A couple small fixes to prevent errors associated with sample start and ending when no values are passed to EK60. 1) fixed names for self.read_start_sample and end_sample in __init__ (sample and start or end were reversed). 2) added assignment of start_sample and end_sample if no values passed to EK60
@@ -135,8 +135,8 @@ class EK60(object): self.read_end_time = None self.read_start_ping = None self.read_end_ping = None - self.read_sample_start = None - self.read_sample_end = None + self.read_start_sample = None + self.read_end_sample = None # read_frequencies can be set to a list of floats specifying the frequencies to # read. An empty list will result in all frequencies being read. @@ -689,10 +689,12 @@ class raw_data(sample_data): self.sample_count[this_ping] = sample_datagram['count'] - start_sample else: self.sample_offset[this_ping] = 0 + start_sample = 0 if (end_sample): self.sample_count[this_ping] = end_sample + 1 else: self.sample_count[this_ping] = sample_datagram['count'] + end_sample = sample_datagram['count'] # now store the 2d "sample" data # determine what we need to store based on operational mode
Set whitespace-only strings to null in date columns. Fixes part of
@@ -99,6 +99,10 @@ c.execute("LOAD DATA LOCAL INFILE %s INTO TABLE raw_table " # Remove the very few records that mess up the demo # (demo purposes only! Don't do something like this in production) c.execute("DELETE FROM raw_table WHERE LENGTH(date_recieved) < 10") + +# set empty, non-zero, strings in date columns to null +c.execute("UPDATE raw_table SET report_period_begin = NULL WHERE LENGTH(report_period_begin) < 10") +c.execute("UPDATE raw_table SET report_period_end = NULL WHERE LENGTH(report_period_end) < 10") conn.commit()
change to backup name and procedure Named backup is changed to: projectname_BKP_yyyymmdd_hh.qda This also means multiple backups are produces less often. This is a nuisance issue for a few people. So a new backup cannot overwrite another backup created within the same hour.
@@ -860,6 +860,10 @@ class MainWindow(QtWidgets.QMainWindow): """ Open an existing project. if set, also save a backup datetime stamped copy at the same time. Do not backup on a newly created project, as it wont contain data. + A backup is created if settings backuop is True. + The backup is deleted, if no changes occured. + Backups are created using the date and 24 hour suffix: _BKUP_yyyymmdd_hh + Backups are not replaced within the same hour. param: path: if path is "" then get the path from a dialog, otherwise use the supplied path newproject: yes or no if yes then do not make an initial backup @@ -921,12 +925,17 @@ class MainWindow(QtWidgets.QMainWindow): cur.execute("ALTER TABLE code_text ADD avid integer;") self.app.conn.commit() - # Save a datetime stamped backup + # Save a date and 24hour stamped backup if self.app.settings['backup_on_open'] == 'True' and newproject == "no": - nowdate = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - backup = self.app.project_path[0:-4] + "_BACKUP_" + nowdate + ".qda" + nowdate = datetime.datetime.now().strftime("%Y%m%d_%H") # -%M-%S") + backup = self.app.project_path[0:-4] + "_BKUP_" + nowdate + ".qda" if self.app.settings['backup_av_files'] == 'True': + try: shutil.copytree(self.app.project_path, backup) + except FileExistsError as e: + msg = _("There is already a backup with this name") + print(str(e) + "\n" + msg) + logger.warning(_(msg) + "\n" + str(e)) else: shutil.copytree(self.app.project_path, backup, ignore=shutil.ignore_patterns('*.mp3','*.wav','*.mp4', '*.mov','*.ogg','*.wmv','*.MP3','*.WAV','*.MP4', '*.MOV','*.OGG','*.WMV')) self.ui.textEdit.append(_("WARNING: audio and video files NOT backed up. See settings."))
Fixed bug nesting 2 Fixed a bug that occured when making complex nesting schemes
@@ -760,6 +760,10 @@ class SequencerWidget(QtGui.QWidget): current_sequence[depth_idx] = [] current_sequence[depth_idx - 1] = [] + if depth == next_depth: + temp_sequence[depth].extend(current_sequence[depth]) + current_sequence[depth] = [] + sequences = temp_sequence[0] for idx in range(len(sequences)):
Replacing the photo handler Replacing the photo handler and remove compatible_aspect_ratio() from upload_story_photo()
@@ -7,7 +7,7 @@ from requests_toolbelt import MultipartEncoder import json from . import config -from .api_photo import resize_image, compatible_aspect_ratio, get_image_size +from .api_photo import stories_shaper, resize_image, compatible_aspect_ratio, get_image_size def download_story(self, filename, story_url, username): @@ -29,12 +29,9 @@ def download_story(self, filename, story_url, username): def upload_story_photo(self, photo, upload_id=None): if upload_id is None: upload_id = str(int(time.time() * 1000)) - photo = resize_image(photo) + photo = stories_shaper(photo) if not photo: return False - if not compatible_aspect_ratio(get_image_size(photo)): - self.logger.error('Photo does not have a compatible photo aspect ratio.') - return False with open(photo, 'rb') as f: photo_bytes = f.read()
Fix misrendered docstring The API reference for `flask.Config.from_mapping` needs a newline to separate the summary from the return description. I also wrapped the docstring at 72 characters as suggested in CONTRIBUTING.rst.
@@ -275,8 +275,9 @@ class Config(dict): def from_mapping( self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any ) -> bool: - """Updates the config like :meth:`update` ignoring items with non-upper - keys. + """Updates the config like :meth:`update` ignoring items with + non-upper keys. + :return: Always returns ``True``. .. versionadded:: 0.11
removing as causes bug with upgrade path possibly causes upgrade via web to break ``` # This attaches the armui_cfg globally to let the users use any bootswatch skin from cdn armui_cfg = UISettings.query.filter_by().first() app.jinja_env.globals.update(armui_cfg=armui_cfg) ```
@@ -24,9 +24,6 @@ from flask_login import LoginManager, login_required, current_user, login_user, login_manager = LoginManager() login_manager.init_app(app) -# This attaches the armui_cfg globally to let the users use any bootswatch skin from cdn -armui_cfg = UISettings.query.filter_by().first() -app.jinja_env.globals.update(armui_cfg=armui_cfg) @login_manager.user_loader
Fix for 151 empty internal attributes from LDAP store Fix for 151 "Can internal response attributes have emptyvalues?". With this fix the LDAP attribute store will no longer set an internal attribute to be an empty list when the corresponding LDAP attribute returned with the record is empty.
@@ -167,8 +167,11 @@ class LdapAttributeStore(satosa.micro_services.base.ResponseMicroService): # Populate attributes as configured. for attr in search_return_attributes.keys(): if attr in record["attributes"]: + if record["attributes"][attr]: data.attributes[search_return_attributes[attr]] = record["attributes"][attr] satosa_logging(logger, logging.DEBUG, "{} Setting internal attribute {} with values {}".format(logprefix, search_return_attributes[attr], record["attributes"][attr]), context.state) + else: + satosa_logging(logger, logging.DEBUG, "{} Not setting internal attribute {} because value {} is null or empty".format(logprefix, search_return_attributes[attr], record["attributes"][attr]), context.state) # Populate input for NameID if configured. SATOSA core does the hashing of input # to create a persistent NameID.
Fix in MO view HG-- branch : feature/microservices
@@ -71,12 +71,10 @@ class ManagedObjectApplication(ExtModelApplication): ] def field_platform(self, o): - # return o.platform - return o.ex_platform + return o.platform def field_version(self, o): - # return o.get_attr("version") - return o.ex_version + return o.get_attr("version") def field_row_class(self, o): return o.object_profile.style.css_class_name if o.object_profile.style else ""
Clarify preferred style in luci-py README.md Context in comments of
@@ -69,6 +69,15 @@ Run the following to setup the code review tool and create your first review: Use `git cl help` and `git cl help <cmd>` for more details. +## Style + +The preferred style is PEP8 with two-space indent; that is, the [Chromium +Python +style](https://chromium.googlesource.com/chromium/src/+/master/styleguide/python/python.md), +except functions use `lowercase_with_underscores`. Use yapf (`git cl format`) +to autoformat new code. + + ## License This project is licensed under Apache v2.0 license. See LICENSE for details.
Add link for telegram bot Add link for telegram bot
@@ -49,6 +49,8 @@ https://discordapp.com/oauth2/authorize?client_id=537526751170002946&permissions Send `!help` to open the bot help message. +Telegram Bot +https://t.me/epub_smelter_bot `<!-- Add your bot here -->` ### A3. Python package (for Windows, Mac, and Linux)
correction in "getting response from bot." section Correction in "getting a response from your chat bot" section. the code is not executing with 'None' in the bracket, hence added 'input()' to get inputs from the user. optionally added a print statement on next line to show the response from bot.
@@ -131,7 +131,8 @@ we can exit the loop and stop the program when a user enters `ctrl+c`. while True: try: - bot_input = bot.get_response(None) + bot_input = bot.get_response(input()) + print(bot_input) except(KeyboardInterrupt, EOFError, SystemExit): break
Ignore summary when only injected batches When a candidate block only contains valid injected batches, do not produce a summary. The block would fail the criteria that it should contain at least on externally submitted batch.
@@ -361,7 +361,7 @@ impl CandidateBlock { .map(|(batch_id, _)| batch_id.clone()) .collect(); - let valid_batch_ids: Vec<String> = execution_results + let valid_batch_ids: HashSet<String> = execution_results .batch_results .into_iter() .filter(|(_, txns)| match txns { @@ -380,6 +380,11 @@ impl CandidateBlock { let mut bad_batches = vec![]; let mut pending_batches = vec![]; + if self.injected_batch_ids == valid_batch_ids { + // There only injected batches in this block + return Ok(None); + } + for batch in self.pending_batches.clone() { let header_signature = &batch.header_signature.clone(); if batch.trace {
Updated file ref:
@@ -8,45 +8,45 @@ automation: platform: mqtt topic: frigate/events conditions: - - "{{ trigger.payload_json["after"]["label"] == 'person' }}" - - "{{ 'yard' in trigger.payload_json["after"]["entered_zones"] }}" + - "{{ trigger.payload_json['after']['label'] == 'person' }}" + - "{{ 'yard' in trigger.payload_json['after']['entered_zones'] }}" action: - service: notify.mobile_app_pixel_3 data_template: - message: 'A {{trigger.payload_json["after"]["label"]}} has entered the yard.' + message: "A {{trigger.payload_json['after']['label']}} has entered the yard." data: - image: 'https://url.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}.jpg' - tag: '{{trigger.payload_json["after"]["id"]}}' + image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}.jpg" + tag: "{{trigger.payload_json['after']['id']}}" - alias: When a person leaves a zone named yard trigger: platform: mqtt topic: frigate/events conditions: - - "{{ trigger.payload_json["after"]["label"] == 'person' }}" - - "{{ 'yard' in trigger.payload_json["before"]["current_zones"] }}" - - "{{ not 'yard' in trigger.payload_json["after"]["current_zones"] }}" + - "{{ trigger.payload_json['after']['label'] == 'person' }}" + - "{{ 'yard' in trigger.payload_json['before']['current_zones'] }}" + - "{{ not 'yard' in trigger.payload_json['after']['current_zones'] }}" action: - service: notify.mobile_app_pixel_3 data_template: - message: 'A {{trigger.payload_json["after"]["label"]}} has left the yard.' + message: "A {{trigger.payload_json['after']['label']}} has left the yard." data: - image: 'https://url.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}.jpg' - tag: '{{trigger.payload_json["after"]["id"]}}' + image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}.jpg" + tag: "{{trigger.payload_json['after']['id']}}" - alias: Notify for dogs in the front with a high top score trigger: platform: mqtt topic: frigate/events conditions: - - "{{ trigger.payload_json["after"]["label"] == 'dog' }}" - - "{{ trigger.payload_json["after"]["camera"] == 'front' }}" - - "{{ trigger.payload_json["after"]["top_score"] > 0.98 }}" + - "{{ trigger.payload_json['after']['label'] == 'dog' }}" + - "{{ trigger.payload_json['after']['camera'] == 'front' }}" + - "{{ trigger.payload_json['after']['top_score'] > 0.98 }}" action: - service: notify.mobile_app_pixel_3 data_template: message: 'High confidence dog detection.' data: - image: 'https://url.com/api/frigate/notifications/{{trigger.payload_json["after"]["id"]}}.jpg' - tag: '{{trigger.payload_json["after"]["id"]}}' + image: "https://url.com/api/frigate/notifications/{{trigger.payload_json['after']['id']}}.jpg" + tag: "{{trigger.payload_json['after']['id']}}" ```
[fix] Stop loading all http responses into memory. fix Reverts
@@ -238,7 +238,6 @@ class Session(requests.Session): try: log.debug('Fetching URL %s with args %s and kwargs %s', url, args, kwargs) result = super(Session, self).request(method, url, *args, **kwargs) - log.trace('Contents for URL %s: %s', url, result.text) except requests.Timeout: # Mark this site in known unresponsive list set_unresponsive(url)
Update EPEL mirror link This update changes the previous listed mirror URL as the epel package is missing which is causing container image builds to fail.
@@ -53,7 +53,7 @@ RUN set -ex && \ curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /build_output/deps v0.13.0 # stage RPM dependency binaries -RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ +RUN yum install -y https://download-ib01.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ yum install -y --downloadonly --downloaddir=/build_output/deps/ dpkg clamav clamav-update RUN tar -z -c -v -C /build_output -f /anchore-buildblob.tgz .
fw/execution: rename things for clarity Rename "instrument_name" to "instrument" inside do_execute(), as ConfigManger.get_instrument() returns a list of Instrument objects, not names. To avoid name clash, rename the imported instrument module to "instrumentation".
@@ -23,7 +23,7 @@ from copy import copy from datetime import datetime import wa.framework.signal as signal -from wa.framework import instrument +from wa.framework import instrument as instrumentation from wa.framework.configuration.core import Status from wa.framework.exception import TargetError, HostError, WorkloadError from wa.framework.exception import TargetNotRespondingError, TimeoutError # pylint: disable=redefined-builtin @@ -405,9 +405,9 @@ class Executor(object): context.output.write_state() self.logger.info('Installing instruments') - for instrument_name in context.cm.get_instruments(self.target_manager.target): - instrument.install(instrument_name, context) - instrument.validate() + for instrument in context.cm.get_instruments(self.target_manager.target): + instrumentation.install(instrument, context) + instrumentation.validate() self.logger.info('Installing output processors') pm = ProcessorManager()
catch nginx timeout in preindex for couchdb2, where we're proxying with nginx
@@ -35,7 +35,7 @@ def index_design_docs(db, docid, design_name): try: list(db.view(view, limit=0)) except RequestFailed as e: - if 'timeout' not in e.message: + if 'timeout' not in e.message and 'Time-out' not in e.message: raise else: break
Minor updates to drivers: .dscl -> .robust; fewer std gauge opts. Now "standard practice" gauge optimizations only include two spam weights with vSpam == 1 in both cases.
@@ -574,7 +574,7 @@ def do_long_sequence_gst_base(dataFilenameOrSet, targetGateFilenameOrSet, # and just keep (?) old estimates of all prior iterations (or use "blank" # sentinel once this is supported). - ret.add_estimate(gs_target, gs_start, gs_lsgst_list, scale_params, estlbl + ".dscl") + ret.add_estimate(gs_target, gs_start, gs_lsgst_list, scale_params, estlbl + ".robust") #Do final gauge optimization to data-scaled estimate also if gaugeOptParams != False: @@ -587,11 +587,11 @@ def do_long_sequence_gst_base(dataFilenameOrSet, targetGateFilenameOrSet, # ret.estimates[estlbl].add_gaugeoptimized(gaugeOptParams, go_gs_final) # # tNxt = _time.time() - # profiler.add_time('do_long_sequence_gst: dscl gauge optimization',tRef); tRef=tNxt + # profiler.add_time('do_long_sequence_gst: robust gauge optimization',tRef); tRef=tNxt #else: # add same gauge-optimized result as above - ret.estimates[estlbl + ".dscl"].add_gaugeoptimized(gaugeOptParams, go_gs_final) + ret.estimates[estlbl + ".robust"].add_gaugeoptimized(gaugeOptParams, go_gs_final) elif onBadFit == "do nothing": @@ -723,16 +723,16 @@ def do_stdpractice_gst(dataFilenameOrSet,targetGateFilenameOrSet, printer-1) #Gauge optimize to a variety of spam weights - for vSpam in [0,1]: - for spamWt in [1e-4,1e-2,1e-1]: + for vSpam in [1]: + for spamWt in [1e-4,1e-1]: ret.estimates[est_label].add_gaugeoptimized( {'itemWeights': {'gates':1, 'spam':spamWt}, 'validSpamPenalty': vSpam}, None, "Spam %g%s" % (spamWt, "+v" if vSpam else "")) #Gauge optimize data-scaled estimate also - if est_label + ".dscl" in ret.estimates: - ret.estimates[est_label + ".dscl"].add_gaugeoptimized( + if est_label + ".robust" in ret.estimates: + ret.estimates[est_label + ".robust"].add_gaugeoptimized( {'itemWeights': {'gates':1, 'spam':spamWt}, 'validSpamPenalty': vSpam}, None, "Spam %g%s" % (spamWt, "+v" if vSpam else ""))
Updated German Renewable Capacities updated from same source
], "capacity": { "battery storage": 280, - "biomass": 8550, + "biomass": 8560, "coal": 43950, "gas": 30500, "geothermal": 47, "hydro storage": 9810, "nuclear": 8114, "oil": 4380, - "solar": 54520, + "solar": 56040, "unknown": 3700, - "wind": 62590 + "wind": 63170 }, "contributors": [ "https://github.com/corradio", "https://github.com/bohne13", - "https://github.com/chiefymuc" + "https://github.com/chiefymuc", + "https://github.com/nessie2013" ], "parsers": { "consumption": "ENTSOE.fetch_consumption",
Fix openstack-tox-py39-with-oslo-master job DB writer and reader contexts are added to test case [0] to make sure updates are commited to DB before testing if objects have been deleted. [0] neutron.tests.unit.objects.test_quota.ReservationDbObjectTestCase.test_delete_expired Closes-Bug:
import datetime +from neutron_lib.db import api as db_api from oslo_utils import uuidutils from neutron.objects import quota -from neutron.tests import base as test_base from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api @@ -59,16 +59,18 @@ class ReservationDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, reservation.create() return reservation - @test_base.unstable_test('bug/1988604') def test_delete_expired(self): dt = datetime.datetime.utcnow() resources = {'goals': 2, 'assists': 1} exp_date1 = datetime.datetime(2016, 3, 31, 14, 30) - res1 = self._create_test_reservation(resources, exp_date1) exp_date2 = datetime.datetime(2015, 3, 31, 14, 30) + with db_api.CONTEXT_WRITER.using(self.context): + res1 = self._create_test_reservation(resources, exp_date1) res2 = self._create_test_reservation(resources, exp_date2) + with db_api.CONTEXT_WRITER.using(self.context): self.assertEqual(2, self._test_class.delete_expired( self.context, dt, None)) + with db_api.CONTEXT_READER.using(self.context): objs = self._test_class.get_objects(self.context, id=[res1.id, res2.id]) self.assertEqual([], objs)
Fix New.subexprs Keys should not be names but strings
@@ -188,7 +188,7 @@ class New(AbstractExpression): @property def subexprs(self): - result = dict(self.assocs) + result = {str(k): v for k, v in self.assocs.items()} result['_type'] = self.static_type.name() return result
Add support for pushing built images to ECR Summary: Going forward we will push built images to ECR as well as DockerHub as part of our release process. Test Plan: Manual Reviewers: johann, rexledesma, catherinewu
+from typing import List + import click from dagster import __version__ as current_dagster_version from dagster import check CLI_HELP = """This CLI is used for building the various Dagster images we use in test """ +# We are waiting on our custom alias, and then this will be `public.ecr.aws/dagster` +AWS_ECR_REGISTRY = "public.ecr.aws/f8w6w7v8" + @click.group(help=CLI_HELP) def cli(): @@ -83,6 +88,21 @@ def push_all(name): image.push(python_version) +def push_to_registry(name: str, tags: List[str]): + check.str_param(name, "name") + check.list_param(tags, "tags", of_type=str) + + image = DagsterDockerImage(name) + + python_version = next(iter(image.python_versions)) + + local_image = image.local_image(python_version) + + for tag in tags: + execute_docker_tag(local_image, tag) + execute_docker_push(tag) + + @cli.command() @click.option("--name", required=True, help="Name of image to push") @click.option( @@ -94,31 +114,47 @@ def push_dockerhub(name, dagster_version): """Used for pushing k8s images to Docker Hub. Must be logged in to Docker Hub for this to succeed. """ - check.invariant( dagster_version == current_dagster_version, - desc="Current dagster version ({}) does not match provided arg ({})".format( - current_dagster_version, dagster_version + desc=( + f"Current dagster version ({current_dagster_version}) does not match provided arg " + f"({dagster_version})" ), ) + tags = [f"dagster/{name}:{current_dagster_version}", f"dagster/{name}:latest"] - image = DagsterDockerImage(name) + push_to_registry(name, tags) - python_version = next(iter(image.python_versions)) - local_image = image.local_image(python_version) [email protected]() [email protected]("--name", required=True, help="Name of image to push") [email protected]( + "--dagster-version", + required=True, + help="Version of image to push, must match current dagster version", +) +def push_ecr(name, dagster_version): + """Used for pushing k8s images to our public ECR. - # Tag image as Dagster version (plan to release this image w/ Dagster releases) - image_with_dagster_version_tag = "dagster/{image}:{tag}".format( - image=name, tag=current_dagster_version + You must be authed for ECR. Run: + + aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/f8w6w7v8 + """ + + check.invariant( + dagster_version == current_dagster_version, + desc=( + f"Current dagster version ({current_dagster_version}) does not match provided arg " + f"({dagster_version})" + ), ) - execute_docker_tag(local_image, image_with_dagster_version_tag) - execute_docker_push(image_with_dagster_version_tag) - # Also push latest tag - latest_tag = "dagster/{}:latest".format(name) - execute_docker_tag(local_image, latest_tag) - execute_docker_push(latest_tag) + tags = [ + f"{AWS_ECR_REGISTRY}/{name}:{current_dagster_version}", + f"{AWS_ECR_REGISTRY}/{name}:latest", + ] + + push_to_registry(name, tags) def main():
core: Allow deferral of of going up In a GoingUpEvent handler, one can get a deferral. POX will not transition to Up state until all the deferrals have been called.
@@ -128,7 +128,8 @@ pox.lib.revent.revent.handleEventException = _revent_exception_hook class GoingUpEvent (Event): """ Fired when system is going up. """ - pass + def get_deferral (self): + return self.source._get_go_up_deferral() class GoingDownEvent (Event): """ Fired when system is going down. """ @@ -197,6 +198,8 @@ class POXCore (EventMixin): self.starting_up = True self.components = {'core':self} + self._go_up_deferrals = set() + self._openflow_wanted = False self._handle_signals = handle_signals @@ -380,6 +383,30 @@ class POXCore (EventMixin): self._add_signal_handlers() + if not self._go_up_deferrals: + self._goUp_stage2() + + def _get_go_up_deferral (self): + """ + Get a GoingUp deferral + + By doing this, we are deferring progress starting at the GoingUp stage. + The return value should be called to allow progress again. + """ + o = object() + self._go_up_deferrals.add(o) + def deferral (): + if o not in self._go_up_deferrals: + raise RuntimeError("This deferral has already been executed") + self._go_up_deferrals.remove(o) + if not self._go_up_deferrals: + log.debug("Continuing to go up") + self._goUp_stage2() + + return deferral + + def _goUp_stage2 (self): + self.raiseEvent(UpEvent()) self._waiter_notify()
llvm, functions/Stability: Reorganize state structure Make better use of existing state_id infrastructure.
@@ -361,17 +361,12 @@ class Stability(ObjectiveFunction): my_params = super()._get_param_values(context) return (*my_params, self._metric_fct._get_param_values(context)) - def _get_state_struct_type(self, ctx): - my_state = ctx.get_state_struct_type(super()) - metric_state = ctx.get_state_struct_type(self._metric_fct) - transfer_state = ctx.get_state_struct_type(self.transfer_fct) if self.transfer_fct is not None else pnlvm.ir.LiteralStructType([]) - return pnlvm.ir.LiteralStructType([my_state, metric_state, transfer_state]) - - def _get_state_initializer(self, context): - my_state = super()._get_state_initializer(context) - metric_state = self._metric_fct._get_state_initializer(context) - transfer_state = self.transfer_fct._get_state_initializer(context) if self.transfer_fct is not None else tuple() - return (my_state, metric_state, transfer_state) + def _get_state_ids(self): + return super()._get_state_ids() + ["metric_fct"] + + def _get_state_values(self, context=None): + my_values = super()._get_state_values(context) + return (*my_values, self._metric_fct._get_state_values(context)) def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out): # Dot product @@ -405,7 +400,7 @@ class Stability(ObjectiveFunction): # Distance Function metric_params = ctx.get_param_ptr(self, builder, params, "metric_fct") - metric_state = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)]) + metric_state = ctx.get_state_ptr(self, builder, state, "metric_fct") metric_out = arg_out builder.call(metric_fun, [metric_params, metric_state, metric_in, metric_out]) return builder
Update Quickstart with requested changes addition of 'but' removal trailing phrase inside parentheses Url replaced with URL as it's still an acronym so it's written with capital letters
@@ -16,7 +16,7 @@ C. Assign rwx(read/write/execute) permissions to the user for default database d $ sudo chmod -R 700 /data/db ``` -D. Run MongoDB (do not close this terminal): +D. Run MongoDB (but do not close this terminal): ```text $ sudo mongod --replSet=bigchain-rs ``` @@ -57,7 +57,7 @@ I. Run BigchainDB Server: $ bigchaindb start ``` -J. Verify BigchainDB Server setup by visiting the BigchainDB Root Url in your browser (if not changed during installation): +J. Verify BigchainDB Server setup by visiting the BigchainDB Root URL in your browser: ```text $ http://127.0.0.1:9984/ ```
- markets.lower() not worked Some lines was a bit confused, if (never) removed
@@ -449,8 +449,7 @@ class BinanceWebSocketApiManager(threading.Thread): else: return False else: - if market == "!userData" or market == "!miniTicker": - query += market + "@" + channel + "/" + return False else: query += market.lower() + "@" + channel + "/" uri = self.websocket_base_uri + str(query)
Remove unused self.host_repos_path from plugin InjectYumRepoPlugin After the removal, no need to keep __init__ there. So, remove it as well.
@@ -55,17 +55,6 @@ class InjectYumRepoPlugin(PreBuildPlugin): key = "inject_yum_repo" is_allowed_to_fail = False - def __init__(self, tasker, workflow): - """ - constructor - - :param tasker: ContainerTasker instance - :param workflow: DockerBuildWorkflow instance - """ - # call parent constructor - super(InjectYumRepoPlugin, self).__init__(tasker, workflow) - self.host_repos_path = os.path.join(self.workflow.builder.df_dir, RELATIVE_REPOS_PATH) - def run(self): """ run the plugin
Update ncbi-covid-19.yaml Updated the "Frequency of updates" and fixed a typo
Name: COVID-19 Genome Sequence Dataset -Description: A centralized sequence repository for all strains of novel corona virus (SARS-CoV-2) submitted to the National Center for Biotechnology Information (NCBI). Included are both the original sequences submitted by the principal investigator as well as SRA-processed sequences that require the SRA Toolkit for anaylsis. +Description: A centralized sequence repository for all strains of novel corona virus (SARS-CoV-2) submitted to the National Center for Biotechnology Information (NCBI). Included are both the original sequences submitted by the principal investigator as well as SRA-processed sequences that require the SRA Toolkit for analysis. Documentation: https://www.ncbi.nlm.nih.gov/sra/docs/sra-aws-download/ Contact: https://support.nlm.nih.gov/support/create-case/ ManagedBy: "[National Library of Medicine (NLM)](http://nlm.nih.gov/)" -UpdateFrequency: TBD +UpdateFrequency: Hourly Tags: - aws-pds - bioinformatics
Fix client library URL. Closes
@@ -10,7 +10,7 @@ variants, to support your global user base. .. _Beta: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst .. _Cloud Speech API: https://cloud.google.com/speech -.. _Client Library Documentation: https://googlecloudplatform.github.io/google-cloud-python/stable/speech/usage.html +.. _Client Library Documentation: https://googlecloudplatform.github.io/google-cloud-python/latest/speech/index.html .. _Product Documentation: https://cloud.google.com/speech Quick Start
add windows-latest and macos-latest to build matrix also: apt update before apt install
@@ -12,6 +12,15 @@ jobs: - name: linux-3.10 python-version: "3.10" os: ubuntu-latest + - name: windows-3.10 + python-version: "3.10" + os: windows-latest + - name: macos-11-3.10-skip-exe + python-version: "3.10" + os: macos-11 + - name: macos-10.5-3.10-skip-exe + python-version: "3.10" + os: macos-10.15 - name: linux-3.10-flake8-skip-exe python-version: "3.10" os: ubuntu-latest @@ -37,9 +46,22 @@ jobs: python-version: pypy-3.6 os: ubuntu-latest steps: - - name: Apt install system dependencies (unless -skip-exe) - run: sudo apt install graphviz + - name: Apt install system dependencies (linux unless -skip-exe) + if: ${{ startsWith(matrix.name, 'linux-') && !contains(matrix.name, '-skip-exe') }} + run: | + sudo apt-get update + sudo apt-get install graphviz + - name: Brew install system dependencies (macos unless -skip-exe) + if: ${{ startsWith(matrix.name, 'macos-') && !contains(matrix.name, '-skip-exe') }} + run: | + brew update --preinstall + brew install graphviz + - name: Choco install system dependencies (windows unless -skip-exe) + if: ${{ startsWith(matrix.name, 'windows-') && !contains(matrix.name, '-skip-exe') }} + run: choco install graphviz + - name: Run dot -V (unless -skip-exe) if: ${{ !contains(matrix.name, '-skip-exe') }} + run: dot -V - name: Checkout source repo uses: actions/checkout@v2 - name: Setup Python ${{ matrix.python-version }} @@ -51,29 +73,29 @@ jobs: - name: Pip install dependencies run: pip install .[test] codecov flake8 - name: Run full tests (unless -skip-exe) - run: python run-tests.py if: ${{ !contains(matrix.name, '-skip-exe') }} + run: python run-tests.py - name: Run tests with --skip-exe (always included) run: python run-tests.py --skip-exe --cov-append - name: Upload test coverage uses: codecov/codecov-action@v1 - name: Run try-examples.py (unless -skip-exe) - run: python try-examples.py if: ${{ !contains(matrix.name, '-skip-exe') }} + run: python try-examples.py continue-on-error: true - name: Run update-help.py (if -update-help or -3.9) - run: python update-help.py if: ${{ contains(matrix.name, '-update-help') || contains(matrix.name, '-3.9') }} + run: python update-help.py continue-on-error: true - name: Run pytype (if -pytype) + if: ${{ contains(matrix.name, '-pytype') }} run: | pip install pytype pytype graphviz - if: ${{ contains(matrix.name, '-pytype') }} continue-on-error: true - name: Run flake8 (if -flake8) - run: python -m flake8 if: ${{ contains(matrix.name, '-flake8') }} + run: python -m flake8 continue-on-error: true - name: Upload ${{ matrix.name }} artifacts uses: actions/upload-artifact@v2
Unlisten Gracefully. Calls the process queue for the final time after shutdown. Finishing up any remaining processing of the queue and any removed listeners etc.
@@ -647,12 +647,7 @@ class Kernel: if channel is None: channel = self.get_context('/').channel('shutdown') - self.state = STATE_END - - # Suspend Signals - def signal(code, *message): - channel(_("Suspended Signal: %s for %s" % (code, message))) - self.signal = signal + self.state = STATE_END # Terminates the Scheduler. # Close Modules for context_name in list(self.contexts): @@ -671,6 +666,12 @@ class Kernel: channel(_("Detaching %s: %s") % (attached_name, str(obj))) context.deactivate(attached_name, channel=channel) + # Suspend Signals + def signal(code, *message): + channel(_("Suspended Signal: %s for %s" % (code, message))) + self.signal = signal + self.process_queue() # Process last events. + # Context Flush and Shutdown for context_name in list(self.contexts): context = self.contexts[context_name]
docs: Added PyCharm to the list of editors. PyCharm is great for writing Python.
@@ -128,6 +128,7 @@ don't have a favorite, here are some suggestions: * [vim](https://www.vim.org/) * [spacemacs](https://github.com/syl20bnr/spacemacs) * [sublime](https://www.sublimetext.com/) +* [PyCharm](https://www.jetbrains.com/pycharm/) Next, follow our [Git and GitHub Guide](../git/index.md) to clone and configure your fork of zulip on your local computer.
Update unit-timeevolution.cpp Try to isolate offending part of tests. When running ./test-timeevolution I got Dopri54 ellapsed time: 94998us odeint ellapsed time: 81332us # Graph created # Number of nodes = 1 No configurations specified for time evolution
@@ -338,6 +338,7 @@ TEST_CASE("Comparison with boost::odeint for Schroedinger eq", "[time-evolution] } #endif +/* TEST_CASE("Time evolution driver produces sensible output", "[time-evolution]") { json pars; @@ -412,3 +413,4 @@ TEST_CASE("Time evolution driver produces sensible output", "[time-evolution]") } } } +*/
tox: switch from pep8 to flake8 Added a lot more errors/warnings to the list so that things would still pass. Follow on commits will back off these one by one so make the review process easier.
@@ -13,7 +13,7 @@ commands = [testenv:pep8] deps = - pep8 + flake8 commands = - {envbindir}/pep8 -r --show-source --max-line-length=84 --ignore=E123,E124,E126,E127,E128,E303,E302 pyrax/ + {envbindir}/flake8 --show-source --max-line-length=84 --ignore=E123,E124,E126,E127,E128,E303,E302,W606,F841,E301,F401,E305,F811,F812,W504,W605,F403,E722,F405,E999 pyrax/
Note that Quart-Motor has been released This allows for MongoDB connections.
@@ -19,6 +19,8 @@ here, broadcasting via WebSockets or SSE. - `Quart-minify <https://github.com/AceFire6/quart_minify/>`_ minify quart response for HTML, JS, CSS and less. +- `Quart-Motor <https://github.com/marirs/quart-motor>`_ Motor + (MongoDB) support for Quart applications. - `Quart-OpenApi <https://github.com/factset/quart-openapi/>`_ RESTful API building. - `Quart-Rate-Limiter
faster tril_indices nonzero is slow. Use np.indices and np.broadcast_to to speed it up.
@@ -894,7 +894,10 @@ def tril_indices(n, k=0, m=None): [-10, -10, -10, -10]]) """ - return nonzero(tri(n, m, k=k, dtype=bool)) + tri = np.tri(n, m=m, k=k, dtype=bool) + + return tuple(np.broadcast_to(inds, tri.shape)[tri] + for inds in np.indices(tri.shape, sparse=True)) def _trilu_indices_form_dispatcher(arr, k=None): @@ -1010,7 +1013,10 @@ def triu_indices(n, k=0, m=None): [ 12, 13, 14, -1]]) """ - return nonzero(~tri(n, m, k=k-1, dtype=bool)) + tri = ~np.tri(n, m, k=k - 1, dtype=bool) + + return tuple(np.broadcast_to(inds, tri.shape)[tri] + for inds in np.indices(tri.shape, sparse=True)) @array_function_dispatch(_trilu_indices_form_dispatcher)
ceres-solver: setup options in configure + validate Following up from
@@ -69,6 +69,8 @@ class ceressolverConan(ConanFile): def configure(self): if self.options.shared: del self.options.fPIC + if self.options.use_gflags: + self.options["gflags"].nothreads = False def requirements(self): self.requires("eigen/3.4.0") @@ -76,7 +78,6 @@ class ceressolverConan(ConanFile): self.requires("glog/0.5.0") if self.options.use_gflags: self.requires("gflags/2.2.2") - self.options["gflags"].nothreads = False if self.options.use_TBB: self.requires("tbb/2020.3") @@ -97,6 +98,8 @@ class ceressolverConan(ConanFile): raise ConanInvalidConfiguration("Ceres-solver only links against the release version of glog") if self.options.use_glog and not self.options.use_gflags: #At this stage we can't check the value of self.options["glog"].with_gflags so we asume it is true because is the default value raise ConanInvalidConfiguration("To depend on glog built with gflags (Default behavior) set use_gflags=True, otherwise Ceres may fail to link due to missing gflags symbols.") + if self.options.use_gflags and self.options["gflags"].nothreads: + raise ConanInvalidConfiguration("Ceres-solver requires options gflags:nothreads=False") # This could use a source as to why if tools.Version(self.version) >= "2.0": # 1.x uses ceres-solver specific FindXXX.cmake modules self.generators.append("cmake_find_package")