message
stringlengths
13
484
diff
stringlengths
38
4.63k
Avoid Name_Error exceptions when source file cannot be read TN:
@@ -2749,6 +2749,22 @@ package body ${ada_lib_name}.Analysis.Implementation is -- This is where lexing occurs, so this is where we get most "setup" -- issues: missing input file, bad charset, etc. If we have such an -- error, catch it, turn it into diagnostics and abort parsing. + -- + -- As it is quite common, first check if the file is readable: if not, + -- don't bother opening it and directly emit a diagnostic. This avoid + -- pointless exceptions which harm debugging. + + if Input.Kind = File and then not Input.Filename.Is_Readable then + declare + Name : constant String := Basename (Unit); + begin + Traces.Trace + (Main_Trace, "WARNING: File is not readable: " & Name); + Add_Diagnostic ("Cannot read " & Name); + Rotate_TDH; + return; + end; + end if; declare use Ada.Exceptions;
fix: double urlencoding of values This was breaking URLs for complex filters
@@ -1461,9 +1461,8 @@ frappe.views.ListView = class ListView extends frappe.views.BaseList { get_url_with_filters() { const query_params = this.get_filters_for_args() .map((filter) => { - filter[3] = encodeURIComponent(filter[3]); if (filter[2] === "=") { - return `${filter[1]}=${filter[3]}`; + return `${filter[1]}=${encodeURIComponent(filter[3])}`; } return [ filter[1],
Extend navigation tags with option to display `deprecated` badge This will help visually communicate a deprecated feature to the user.
@@ -7,16 +7,20 @@ register = template.Library() def navbar_template(title, url, active=False, disabled=False, - dropdown=False): + dropdown=False, deprecated=False): """Compose Bootstrap v4 <li> element for top navigation bar. List item can be added one or more class attributes: * active: to highlight currently visited tab * disabled: to disable access, for example for users without specific permissions. + * dropdown: change styling a little bit to work with dropdowns in + Bootstrap4 + * deprecated: add a deprecated badge to indicate old features """ - screen_reader = '' classes = [] + screen_reader = '' + badge = '' if disabled: classes.append('disabled') @@ -25,18 +29,21 @@ def navbar_template(title, url, active=False, disabled=False, classes.append('active') screen_reader = mark_safe(' <span class="sr-only">(current)</span>') + if deprecated: + badge = mark_safe('<span class="badge badge-secondary">deprecated</span> ') + classes = ' '.join(classes) template = ('<li class="nav-item {classes}"><a class="nav-link" ' - 'href="{url}">{title} {screen_reader}</a></li>') + 'href="{url}">{badge}{title}{screen_reader}</a></li>') if dropdown: template = ('<a class="dropdown-item {classes}" href="{url}">' - '{title} {screen_reader}</a>') - return format_html(template, classes=classes, url=url, + '{badge}{title}{screen_reader}</a>') + return format_html(template, classes=classes, url=url, badge=badge, title=title, screen_reader=screen_reader) @register.simple_tag(takes_context=True) -def navbar_element(context, title, url_name, dropdown=False): +def navbar_element(context, title, url_name, dropdown=False, deprecated=False): """ Insert Bootstrap's `<li><a>...</a></li>` with specific classes and accessibility elements. This tag takes a URL name (with no arguments) that @@ -45,7 +52,7 @@ def navbar_element(context, title, url_name, dropdown=False): url = reverse(url_name) active = context['request'].path == url return mark_safe(navbar_template(title, url, active=active, - dropdown=dropdown)) + dropdown=dropdown, deprecated=deprecated)) @register.simple_tag(takes_context=True)
Reduce concurrency to match number of CPUs This got missed in [1]. [1]:
@@ -17,7 +17,7 @@ case $NOTIFY_APP_NAME in -Q database-tasks,job-tasks 2> /dev/null ;; delivery-worker-research) - exec scripts/run_app_paas.sh celery -A run_celery.notify_celery worker --loglevel=INFO --concurrency=5 \ + exec scripts/run_app_paas.sh celery -A run_celery.notify_celery worker --loglevel=INFO --concurrency=4 \ -Q research-mode-tasks 2> /dev/null ;; delivery-worker-sender)
Find packages Error when installing from master because some packages ('models' & 'mixins') are not being included.
@@ -8,7 +8,7 @@ import sys from distutils.util import strtobool -from setuptools import setup +from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand @@ -61,7 +61,7 @@ setup_kwargs = { "Build fast. Run fast." ), "long_description": long_description, - "packages": ["sanic"], + "packages": find_packages(), "package_data": {"sanic": ["py.typed"]}, "platforms": "any", "python_requires": ">=3.7",
web: Don't re-set cookieguard cookie in bouncer If the bouncer has succeeded, the cookie is already set. There's no reason to set it again.
@@ -186,15 +186,10 @@ class POXCookieGuardMixin (object): cgc = cookies.get(POX_COOKIEGUARD_COOKIE_NAME) if cgc and cgc.value == self._get_cookieguard_cookie(): if requested.startswith(self._pox_cookieguard_bouncer + "?"): - # See below for what this bouncing dumbness is log.debug("POX CookieGuard cookie is valid -- bouncing") qs = requested.split("?",1)[1] self.send_response(307, "Temporary Redirect") - self.send_header("Set-Cookie", - "%s=%s; SameSite=Strict; HttpOnly; path=/" - % (POX_COOKIEGUARD_COOKIE_NAME, - self._get_cookieguard_cookie())) self.send_header("Location", unquote_plus(qs)) self.end_headers() return False
[Github] Update Github stable bot Disable automatic issue closing Correct the labels to exempt.
@@ -22,10 +22,10 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} days-before-issue-stale: 30 - days-before-issue-close: 7 + days-before-issue-close: -1 # disable issue close days-before-pr-stale: -1 # disable stale bot on pr days-before-pr-close: -1 # disable stale bot on pr stale-issue-message: 'This issue has been automatically marked as stale due to lack of activity. It will be closed if no further activity occurs. Thank you' close-issue-message: 'This issue is closed due to lack of activity. Feel free to reopen it if you still have questions.' stale-issue-label: 'stale-issue' - exempt-issue-labels: 'RFC,pinned,bug,dist DGL,doc,enhancement,feature request,help wanted,model request,wontfix,Suspended,system performance,windows,PyTorch,MXNet,TensorFlow' + exempt-issue-labels: 'Roadmap,RFC,pinned,bug:confirmed,bug:unconfirmed,dist DGL,doc,enhancement,feature request,help wanted,model request,wontfix,Suspended,system performance,windows,PyTorch,MXNet,TensorFlow'
sync: fix missing import for -q Some refactors during review dropped this import when it was reworked, but it's still needed when using the --quiet setting. Tested-by: Mike Frysinger
@@ -51,7 +51,7 @@ import git_superproject import gitc_utils from project import Project from project import RemoteSpec -from command import Command, MirrorSafeCommand +from command import Command, MirrorSafeCommand, WORKER_BATCH_SIZE from error import RepoChangedException, GitError, ManifestParseError import platform_utils from project import SyncBuffer
Fix - added unc path to zifile command in Harmony Extracting too large url resulted in 'File not found' issue (side effect was that files in offending directory were skipped). UNC path seems to help.
@@ -322,7 +322,9 @@ class HarmonySubmitDeadline( ) unzip_dir = (published_scene.parent / published_scene.stem) with _ZipFile(published_scene, "r") as zip_ref: - zip_ref.extractall(unzip_dir.as_posix()) + # UNC path (//?/) added to minimalize risk with extracting + # to large file paths + zip_ref.extractall("//?/" + str(unzip_dir.as_posix())) # find any xstage files in directory, prefer the one with the same name # as directory (plus extension)
message view: Remove unnecessary expectOne check in tippyjs. This check was not needded as it is possible to have even zero edit message buttons in cases when a message is fails. So it raises unncesary errors on hovering over icons of those failed messages.
@@ -89,7 +89,7 @@ export function initialize() { // content from it. // // TODO: Change the template structure so logic is unnecessary. - const edit_button = elem.find("i.edit_content_button").expectOne(); + const edit_button = elem.find("i.edit_content_button"); content = edit_button.attr("data-tippy-content"); } instance.setContent(content);
Update conf.py edit version in docs' conf
@@ -26,7 +26,7 @@ author = u'Argonne' # The short X.Y version version = u'' # The full version, including alpha/beta/rc tags -release = u'0.1' +release = u'0.0.3' # -- General configuration ---------------------------------------------------
codestyle: Fix D210 D210: No whitespaces allowed surrounding docstring text
@@ -1524,7 +1524,8 @@ class ControlMechanism(ModulatoryMechanism_Base): return control_signal def _check_for_duplicates(self, control_signal, control_signals, context): - """ Check that control_signal is not a duplicate of one already instantiated for the ControlMechanism + """ + Check that control_signal is not a duplicate of one already instantiated for the ControlMechanism Can happen if control of parameter is specified in constructor for a Mechanism and also in the ControlMechanism's **control** arg
Group.append: explicit first argument Don't use args[0].
@@ -116,17 +116,17 @@ class Group(object): for act in activities: propagate_attribute(act, 'raises_on_failure', self.raises_on_failure) - def append(self, *args, **kwargs): - if isinstance(args[0], (Submittable, Group)): + def append(self, submittable, *args, **kwargs): + if isinstance(submittable, (Submittable, Group)): if self.raises_on_failure is not None: - propagate_attribute(args[0], 'raises_on_failure', self.raises_on_failure) - self.activities.append(args[0]) - elif isinstance(args[0], Activity): + propagate_attribute(submittable, 'raises_on_failure', self.raises_on_failure) + self.activities.append(submittable) + elif isinstance(submittable, Activity): if self.raises_on_failure is not None: - propagate_attribute(args[0], 'raises_on_failure', self.raises_on_failure) - self.activities.append(ActivityTask(*args, **kwargs)) + propagate_attribute(submittable, 'raises_on_failure', self.raises_on_failure) + self.activities.append(ActivityTask(submittable, *args, **kwargs)) else: - raise ValueError('{} should be a Submittable, Group, or Activity'.format(args[0])) + raise ValueError('{} should be a Submittable, Group, or Activity'.format(submittable)) def submit(self, executor): return GroupFuture(self.activities, executor, self.max_parallel)
Documentation: Add a logo to the REST API docs The REST API should use the awesome Rucio logo.
@@ -101,6 +101,11 @@ spec = APISpec( "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0.html", }, + "x-logo": { + "url": "http://rucio.cern.ch/documentation/img/rucio_horizontaled_black_cropped.svg", + "backgroundColor": "#FFFFFF", + "altText": "Rucio logo" + }, }, )
Fix copy/paste error in docs The correct method for group listing should be `list_groups_by_name`
@@ -354,7 +354,7 @@ List Groups By Name import hvac client = hvac.Client() - list_response = client.secrets.identity.list_entities_by_name() + list_response = client.secrets.identity.list_groups_by_name() group_keys = list_response['data']['keys'] print('The following group names are currently configured: {keys}'.format(keys=group_keys))
revert: revert the original code and removing format_app from the if block
@@ -122,6 +122,14 @@ class FormplayerMain(View): apps = filter(None, apps) apps = filter(lambda app: app.get('cloudcare_enabled') or self.preview, apps) apps = filter(lambda app: app_access.user_can_access_app(user, app), apps) + role = None + try: + role = user.get_role(domain) + except DomainMembershipError: + # User has access via domain mirroring + pass + if role: + apps = [app for app in apps if role.permissions.view_web_app(app)] apps = [_format_app(app) for app in apps if app['_id']] apps = sorted(apps, key=lambda app: app['name']) return apps
DOC adjusted number of continuous distributions in scipy/doc/source/tutorial number of distributions increased by one in stats.rst
@@ -102,7 +102,7 @@ introspection: >>> dist_discrete = [d for d in dir(stats) if ... isinstance(getattr(stats, d), stats.rv_discrete)] >>> print('number of continuous distributions: %d' % len(dist_continu)) - number of continuous distributions: 96 + number of continuous distributions: 97 >>> print('number of discrete distributions: %d' % len(dist_discrete)) number of discrete distributions: 13
Add Udemy (instructor) API Fix auth value
@@ -1073,6 +1073,7 @@ API | Description | Auth | HTTPS | CORS | | [Quotes on Design](https://quotesondesign.com/api/) | Inspirational Quotes | No | Yes | Unknown | | [Stoicism Quote](https://github.com/tlcheah2/stoic-quote-lambda-public-api) | Quotes about Stoicism | No | Yes | Unknown | | [Traitify](https://app.traitify.com/developer) | Assess, collect and analyze Personality | No | Yes | Unknown | +| [Udemy(instructor)](https://www.udemy.com/developers/instructor/) | API for instructors on Udemy | `apiKey` | Yes | Unknown | | [Vadivelu HTTP Codes](https://vadivelu.anoram.com/) | On demand HTTP Codes with images | No | Yes | No | | [Zen Quotes](https://zenquotes.io/) | Large collection of Zen quotes for inspiration | No | Yes | Yes |
Disabled embed test from the docs on Py3.4 It requires Py_DecodeLocale which appears in 3.5. This is causing it to fail on Windows. It's somehow passing on Linux for reasons that I don't understand (but it really shouldn't be)
@@ -484,6 +484,7 @@ VER_DEP_MODULES = { 'run.pep526_variable_annotations', # typing module 'run.test_exceptions', # copied from Py3.7+ 'run.time_pxd', # _PyTime_GetSystemClock doesn't exist in 3.4 + 'embedding.embedded', # From the docs, needs Py_DecodeLocale ]), (3,7): (operator.lt, lambda x: x in ['run.pycontextvar', 'run.pep557_dataclasses', # dataclasses module
Address minor Black issues Fixes a couple of very minor docstring nits raised by an updated version of Black.
@@ -26,7 +26,7 @@ class SoCoPlugin: @property def name(self): - """ human-readable name of the plugin """ + """Human-readable name of the plugin""" raise NotImplementedError("Plugins should overwrite the name property") @classmethod
docs: Use 1Lbb DOI in contrib download docstring * Use DOI in docstring example for pyhf contrib download - c.f.
@@ -46,7 +46,7 @@ def download(archive_url, output_directory, verbose, force, compress): .. code-block:: shell - $ pyhf contrib download --verbose https://www.hepdata.net/record/resource/1408476?view=true 1Lbb-likelihoods + $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods \b 1Lbb-likelihoods/patchset.json
Lazily create Button custom_ids in decorator interface The previous code would make two separate instances share the custom_id which might have been undesirable behaviour
@@ -267,11 +267,9 @@ def button( """ def decorator(func: ItemCallbackType) -> ItemCallbackType: - nonlocal custom_id if not inspect.iscoroutinefunction(func): raise TypeError('button function must be a coroutine function') - custom_id = custom_id or os.urandom(32).hex() func.__discord_ui_model_type__ = Button func.__discord_ui_model_kwargs__ = { 'style': style,
Use more semantic function name. Add comments.
@@ -644,11 +644,20 @@ function saveAttemptLog(store) { function saveAndStoreAttemptLog(store) { const attemptLogId = store.state.core.logging.attempt.id; const attemptLogItem = store.state.core.logging.attempt.item; - const storeAttemptLog = () => + /* + * Create a 'same item' check instead of same page check, which only allows the resulting save + * payload to be set if two conditions are met: firstly, that at the time the save was + * initiated, the attemptlog did not have an id, we need this id for future updating saves, + * but no other information saved to the server needs to be persisted back into the vuex store; + * secondly, we check that the item id when the save has resolved is the same as when the save + * was initiated, ensuring that we are not overwriting the vuex attemptlog representation for a + * different question. + */ + const sameItemAndNoLogIdCheck = () => !attemptLogId && attemptLogItem === store.state.core.logging.attempt.item; return saveAttemptLog(store).only( - storeAttemptLog, + sameItemAndNoLogIdCheck, newAttemptLog => { // mainly we want to set the attemplot id, so we can PATCH subsequent save on this attemptLog store.dispatch('SET_LOGGING_ATTEMPT_STATE', _attemptLoggingState(newAttemptLog));
Set the default permissions See
@@ -222,6 +222,7 @@ class UserenaBaseProfile(models.Model): """ abstract = True + default_permissions = ('add', 'change', 'delete') permissions = PROFILE_PERMISSIONS def __str__(self): @@ -353,4 +354,5 @@ class UserenaLanguageBaseProfile(UserenaBaseProfile): class Meta: abstract = True + default_permissions = ('add', 'change', 'delete') permissions = PROFILE_PERMISSIONS
Improve logging in building of nova data model Improves logging during the building of the nova data model
@@ -268,6 +268,9 @@ class ModelBuilder(object): # New in nova version 2.53 instances = getattr(node_info, "servers", None) self.add_instance_node(node_info, instances) + else: + LOG.error("compute_node from aggregate / availability_zone " + "could not be found: {0}".format(node_name)) def add_compute_node(self, node): # Build and add base node. @@ -320,7 +323,7 @@ class ModelBuilder(object): def add_instance_node(self, node, instances): if instances is None: - # no instances on this node + LOG.info("no instances on compute_node: {0}".format(node)) return host = node.service["host"] compute_node = self.model.get_node_by_uuid(host)
Removed python_requires="<4" Python 4 doesn't exist, this requirement is redundant
@@ -133,7 +133,7 @@ setup_args = dict( ), 'Issues': 'https://github.com/nedbat/coveragepy/issues', }, - python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4", + python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", ) # A replacement for the build_ext command which raises a single exception
Remove the parameter with_activation from _ApplyActivationFunction. If this is False, just.. don't call the function.
@@ -1169,19 +1169,17 @@ class ProjectionLayer(quant_utils.QuantizableLayer): if not p.is_inference: out = py_utils.CheckNumerics(out) out = activations.GetFn(p.activation)(out) - out = self._ApplyProjectionKernel( - w, b, out, with_activation=False, **proj_kwargs) + out = self._ApplyProjectionKernel(w, b, out, **proj_kwargs) else: # Normal ordered projection. if self._is_bn_folded or not p.batch_norm: - # Everything folded together. This is the only variant that supports - # quantization. + # This is the only variant that supports quantization. out = self._ApplyProjectionKernel(w, b, inputs, **proj_kwargs) + out = self._ApplyActivationFunction(out) out = self.QAct(self._output_qact_name, out) else: - # Projection kernel(no activation fn) -> BN -> Activation fn. - out = self._ApplyProjectionKernel( - w, b, inputs, with_activation=False, **proj_kwargs) + # Projection kernel -> BN -> Activation fn. + out = self._ApplyProjectionKernel(w, b, inputs, **proj_kwargs) if p.batch_norm: out = self.bn.FProp(theta.bn, out, paddings) if p.activation != 'NONE': @@ -1260,8 +1258,7 @@ class ProjectionLayer(quant_utils.QuantizableLayer): proj_kwargs = { 'mix_kernel': theta.mix_kernel } if p.use_block_diagonal_matmul and p.use_bd_mix else {} - raw_output = self._ApplyProjectionKernel( - w, b, inputs, with_activation=False, **proj_kwargs) + raw_output = self._ApplyProjectionKernel(w, b, inputs, **proj_kwargs) mean, variance, beta, gamma = self.bn.ComputeAndUpdateMoments( theta.bn, raw_output, paddings) @@ -1276,24 +1273,17 @@ class ProjectionLayer(quant_utils.QuantizableLayer): w, b, inputs, - with_activation=True, mix_kernel=None): - """Applies matmul/bias/activation in one step. - - Note that it is important that these three ops be computed in this way as - downstream inference engines (esp. for quantized inference) can recognize - and fuse them. For floating point, this is an optimization, but for - quantization, it is required. + """Applies projection. Args: w: Weight matrix. b: Bias vector (or None). inputs: FProp inputs. - with_activation: Whether to also compute the activation function. mix_kernel: (optional) mix_kernel for block diagonal matmul. Returns: - Output tensor reshaped. + Output tensor with projection applied. """ p = self.params @@ -1338,22 +1328,20 @@ class ProjectionLayer(quant_utils.QuantizableLayer): if b is not None: out += b # NOTE: Bias on matmul is never quantized. - out = gshard_utils.MeshSplit(out, p.device_mesh, + return gshard_utils.MeshSplit(out, p.device_mesh, p.activation_split_dims_mapping) - return self._ApplyActivationFunction(out, with_activation) - def _ApplyActivationFunction(self, out, with_activation=True): + def _ApplyActivationFunction(self, out): """Applies the activation function in one step. Args: out: The result of applying the weight matrix (and bias) to the inputs. - with_activation: Whether to also compute the activation function. Returns: - Output tensor reshaped. + Output tensor with activation applied. """ p = self.params - if with_activation and p.activation != 'NONE': + if p.activation != 'NONE': if self._pre_activation_qt_name: # Track quantization for unfused activation function. out = self.QAct(self._pre_activation_qt_name, out)
Fixes a bug in status command where the inspect module is not supported in cython - use asyncio.iscoroutinefuntion instead
import asyncio -import inspect import time -from collections import deque, OrderedDict -from typing import Dict, List -from typing import TYPE_CHECKING +from collections import OrderedDict, deque +from typing import TYPE_CHECKING, Dict, List import pandas as pd @@ -11,7 +9,7 @@ from hummingbot import check_dev_mode from hummingbot.client.config.config_helpers import get_strategy_config_map, missing_required_configs from hummingbot.client.config.global_config_map import global_config_map from hummingbot.client.config.security import Security -from hummingbot.client.settings import required_exchanges, ethereum_wallet_required +from hummingbot.client.settings import ethereum_wallet_required, required_exchanges from hummingbot.connector.connector_base import ConnectorBase from hummingbot.core.network_iterator import NetworkStatus from hummingbot.core.utils.async_utils import safe_ensure_future @@ -71,7 +69,7 @@ class StatusCommand: else "" app_warning = self.application_warning() app_warning = "" if app_warning is None else app_warning - if inspect.iscoroutinefunction(self.strategy.format_status): + if asyncio.iscoroutinefunction(self.strategy.format_status): st_status = await self.strategy.format_status() else: st_status = self.strategy.format_status()
deposit: group required fields together closes
@@ -310,15 +310,19 @@ export class RDMDepositForm extends Component { options={this.vocabularies.metadata.titles} required /> + <PublicationDateField required /> <CreatibutorsField label={"Creators"} + labelIcon={"user"} fieldPath={"metadata.creators"} roleOptions={this.vocabularies.metadata.creators.role} schema="creators" + required /> <CreatibutorsField addButtonLabel={"Add contributor"} label={"Contributors"} + labelIcon={"user plus"} fieldPath={"metadata.contributors"} roleOptions={this.vocabularies.metadata.contributors.role} schema="contributors" @@ -330,7 +334,6 @@ export class RDMDepositForm extends Component { <DescriptionsField options={this.vocabularies.metadata.descriptions} /> - <PublicationDateField required /> <LicenseField // TODO: configure the searchEndpoint searchConfig={{
Update pythonapp.yml [formerly 87cbf4ea25d9033e766d2901c9e25a6749e25abe] [formerly e26c3ee9db40ca63225908252ed718889bd38c8d] [formerly 858b96bb5f666e232bd5cbd0caeedf58a07886fd]
@@ -31,7 +31,8 @@ jobs: flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest - run: | - pip install pytest - pytest + + - name: Python Style Checker + uses: andymckay/[email protected] + +
Update bot_photo.py There is no reason to stop downloading photos if one of them didn't downloaded successful.
@@ -45,5 +45,4 @@ def download_photos(self, medias, path, description=False): if not self.download_photo(media, path, description=description): delay.error_delay(self) broken_items = medias[medias.index(media):] - break return broken_items
Intersection env: driving offroad gives 0 reward and is optionally terminal Fix
@@ -58,7 +58,8 @@ class IntersectionEnv(AbstractEnv): "high_speed_reward": 1, "arrived_reward": 1, "reward_speed_range": [7.0, 9.0], - "normalize_reward": False + "normalize_reward": False, + "offroad_terminal": True }) return config @@ -75,12 +76,14 @@ class IntersectionEnv(AbstractEnv): reward = self.config["arrived_reward"] if self.has_arrived(vehicle) else reward if self.config["normalize_reward"]: reward = utils.lmap(reward, [self.config["collision_reward"], self.config["arrived_reward"]], [0, 1]) + reward = 0 if not vehicle.on_road else reward return reward def _is_terminal(self) -> bool: return any(vehicle.crashed for vehicle in self.controlled_vehicles) \ or all(self.has_arrived(vehicle) for vehicle in self.controlled_vehicles) \ - or self.steps >= self.config["duration"] * self.config["policy_frequency"] + or self.steps >= self.config["duration"] * self.config["policy_frequency"] \ + or (self.config["offroad_terminal"] and not self.vehicle.on_road) def _agent_is_terminal(self, vehicle: Vehicle) -> bool: """The episode is over when a collision occurs or when the access ramp has been passed."""
tests: increase number of threads for testinfra from 4 to 8 to make testing faster.
@@ -220,7 +220,7 @@ commands= # wait 5 minutes for services to be ready sleep 300 # test cluster state using ceph-ansible tests - testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests + testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests # reboot all vms vagrant reload --no-provision @@ -228,7 +228,7 @@ commands= # wait 5 minutes for services to be ready sleep 300 # retest to ensure cluster came back up correctly after rebooting - testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests + testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests # handlers/idempotency test ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} \
Add code to clean up GIS timestamp for dchousing Added code to base.py that converts GIS-DTTM value into readable date timestamp. The data providest the timestamp as milliseconds. So added code to do the conversion as the raw data is ingested and written to file.
@@ -184,6 +184,13 @@ class BaseApiConn(object): data[field] = None else: data[field] = line[value] + + # clean opendata 'GIS_DTTM' formatting - convert milliseconds + if value == 'GIS_LAST_MOD_DTTM': + milli_sec = int(line[value]) + data[field] = \ + datetime.fromtimestamp(milli_sec / 1000.0).strftime( + '%m/%d/%Y') return data def _get_nlihc_id_from_db(self, db_conn, address_id): @@ -205,7 +212,6 @@ class BaseApiConn(object): else: return str(uuid4()), False - def create_project_subsidy_csv(self, uid, project_fields_map, subsidy_fields_map, database_choice=None): """
Add arm energy probe instrument Arm Energy Probe with arm_probe has been recently added in devlib's instrument. Add the arm_energy_probe in the WA list of Energy measurement instruments
from __future__ import division from collections import defaultdict import os +import shutil from devlib import DerivedEnergyMeasurements from devlib.instrument import CONTINUOUS from devlib.instrument.energy_probe import EnergyProbeInstrument +from devlib.instrument.arm_energy_probe import ArmEnergyProbeInstrument from devlib.instrument.daq import DaqInstrument from devlib.instrument.acmecape import AcmeCapeInstrument from devlib.instrument.monsoon import MonsoonInstrument @@ -162,6 +164,37 @@ class EnergyProbeBackend(EnergyInstrumentBackend): msg = 'Number of Energy Probe port labels does not match the number of resistor values.' raise ConfigError(msg) +class ArmEnergyProbeBackend(EnergyInstrumentBackend): + + name = 'arm_energy_probe' + + parameters = [ + Parameter('config_file', kind=str, + description=""" + Path to config file of the AEP + """), + ] + + instrument = ArmEnergyProbeInstrument + + def get_instruments(self, target, metadir, **kwargs): + """ + Get a dict mapping device keys to an Instruments + + Typically there is just a single device/instrument, in which case the + device key is arbitrary. + """ + + shutil.copy(self.config_file, metadirr) + + return {None: self.instrument(target, **kwargs)} + + def validate_parameters(self, params): + if not params.get('config_file'): + raise ConfigError('Mandatory parameter "config_file" is not set.') + self.config_file = params.get('config_file') + if not os.path.exists(self.config_file): + raise ConfigError('"config_file" does not exist.') class AcmeCapeBackend(EnergyInstrumentBackend): @@ -249,7 +282,7 @@ class EnergyMeasurement(Instrument): parameters = [ Parameter('instrument', kind=str, mandatory=True, - allowed_values=['daq', 'energy_probe', 'acme_cape', 'monsoon', 'juno_readenergy'], + allowed_values=['daq', 'energy_probe', 'acme_cape', 'monsoon', 'juno_readenergy', 'arm_energy_probe'], description=""" Specify the energy instruments to be enabled. """),
Update locales.py Italian locale is missing translation for "week" and "weeks"
@@ -331,6 +331,8 @@ class ItalianLocale(Locale): "hours": "{0} ore", "day": "un giorno", "days": "{0} giorni", + "week": "una settimana,", + "weeks": "{0} settimane", "month": "un mese", "months": "{0} mesi", "year": "un anno",
Pin signedjson to <= 1.1.1 as a temporary workaround for To be reverted after the Synapse 1.56 release.
@@ -48,7 +48,7 @@ REQUIREMENTS = [ "unpaddedbase64>=1.1.0", "canonicaljson>=1.4.0", # we use the type definitions added in signedjson 1.1. - "signedjson>=1.1.0", + "signedjson>=1.1.0,<=1.1.1", "pynacl>=1.2.1", "idna>=2.5", # validating SSL certs for IP addresses requires service_identity 18.1.
Group charts by training/reward/value Summary: Group charts by training/reward/value
@@ -1034,25 +1034,34 @@ class Evaluator(object): return x for name, value in [ - ("data/td_loss", self.get_recent_td_loss()), - ("data/mc_loss", self.get_recent_mc_loss()), - ("Direct Method Reward", self.get_recent_reward_direct_method().normalized), + ("Training/td_loss", self.get_recent_td_loss()), + ("Training/mc_loss", self.get_recent_mc_loss()), ( - "IPS Reward", + "Reward_CPE/Direct Method Reward", + self.get_recent_reward_direct_method().normalized, + ), + ( + "Reward_CPE/IPS Reward", self.get_recent_reward_inverse_propensity_score().normalized, ), - ("Doubly Robust Reward", self.get_recent_reward_doubly_robust().normalized), - ("MAGIC Estimator", self.get_recent_value_magic_doubly_robust().normalized), ( - "Doubly Robust One Step", + "Reward_CPE/Doubly Robust Reward", + self.get_recent_reward_doubly_robust().normalized, + ), + ( + "Value_CPE/MAGIC Estimator", + self.get_recent_value_magic_doubly_robust().normalized, + ), + ( + "Value_CPE/Doubly Robust One Step", self.get_recent_value_one_step_doubly_robust().normalized, ), ( - "Weighted Doubly Robust", + "Value_CPE/Weighted Doubly Robust", self.get_recent_value_weighted_doubly_robust().normalized, ), ( - "Sequential Doubly Robust", + "Value_CPE/Sequential Doubly Robust", self.get_recent_value_sequential_doubly_robust().normalized, ), ]:
GraphNode improvements. Constructor accepts an optional children parameter. is_leaf() and find_root() methods. __getitem__() and __iter__() methods. Moved graph dumping into the class.
# limitations under the License. class GraphNode(object): - """! @brief Simple graph node.""" + """! @brief Simple graph node. - def __init__(self): + All nodes have a parent, which is None for a root node, and zero or more children. + + Supports indexing and iteration over children. + """ + + def __init__(self, children=None): """! @brief Constructor.""" super(GraphNode, self).__init__() self._parent = None self._children = [] + if children is not None: + for c in children: + self.add_child(c) @property def parent(self): @@ -33,11 +41,23 @@ class GraphNode(object): """! @brief Child nodes in the object graph.""" return self._children + @property + def is_leaf(self): + """! @brief Returns true if the node has no children.""" + return len(self.children) == 0 + def add_child(self, node): """! @brief Link a child node onto this object.""" node._parent = self self._children.append(node) + def find_root(self): + """! @brief Returns the root node of the object graph.""" + root = self + while root.parent is not None: + root = root.parent + return root + def find_children(self, predicate, breadth_first=True): """! @brief Recursively search for children that match a given predicate. @param self @@ -80,12 +100,32 @@ class GraphNode(object): else: return None -def dump_graph(node): - """! @brief Draw the object graph.""" + def __getitem__(self, key): + """! @brief Returns the indexed child. + + Slicing is supported. + """ + return self._children[key] + + def __iter__(self): + """! @brief Iterate over the node's children.""" + return iter(self.children) + + def _dump_desc(self): + """! @brief Similar to __repr__ by used for dump_to_str().""" + return str(self) + + def dump_to_str(node): + """! @brief Returns a string describing the object graph.""" def _dump(node, level): - print(" " * level + "- " + str(node)) + result = (" " * level) + "- " + node._dump_desc() + "\n" for child in node.children: - _dump(child, level + 1) + result += _dump(child, level + 1) + return result + + return _dump(node, 0) - _dump(node, 0) + def dump(self): + """! @brief Pretty print the object graph to stdout.""" + print(self.dump_to_str())
Give a call out to other event sources in the README Hopefully this raises awareness of other event sources chalice can integrate with.
@@ -15,20 +15,18 @@ Python Serverless Microframework for AWS :target: https://codecov.io/github/aws/chalice :alt: codecov.io -Chalice is a python serverless microframework for AWS. It allows you to quickly -create and deploy applications that use Amazon API Gateway and AWS Lambda. -It provides: +Chalice is a microframework for writing serverless apps in python. It allows +you to quickly create and deploy applications that use AWS Lambda. It provides: * A command line tool for creating, deploying, and managing your app -* A familiar and easy to use API for declaring views in python code +* A decorator based API for integrating with Amazon API Gateway, Amazon S3, + Amazon SNS, Amazon SQS, and other AWS services. * Automatic IAM policy generation -:: +You can create Rest APIs: - $ pip install chalice - $ chalice new-project helloworld && cd helloworld - $ cat app.py +.. code-block:: python from chalice import Chalice @@ -38,6 +36,60 @@ It provides: def index(): return {"hello": "world"} +Tasks that run on a periodic basis: + +.. code-block:: python + + from chalice import Chalice, Rate + + app = Chalice(app_name="helloworld") + + # Automatically runs every 5 minutes + @app.schedule(Rate(5, unit=Rate.MINUTES)) + def periodic_task(): + return {"hello": "world"} + + +You can connect a lambda function to an S3 event: + +.. code-block:: python + + from chalice import Chalice + + app = Chalice(app_name="helloworld") + + # Whenver an object is uploaded to 'mybucket' + # this lambda function will be invoked. + + @app.on_s3_event(bucket='mybucket') + def handler(event): + print("Object uploaded for bucket: %s, key: %s" + % (event.bucket, event.key)) + +As well as an SQS queue: + +.. code-block:: python + + from chalice import Chalice + + app = Chalice(app_name="helloworld") + + # Whenver an object is uploaded to 'mybucket' + # this lambda function will be invoked. + + @app.on_sqs_message(queue='my-queue-name') + def handler(event): + for record in event: + print("Message body: %s" % record.body) + + +And several other AWS resources. + +Once you've written your code, you just run ``chalice deploy`` +and Chalice takes take of deploying your app. + +:: + $ chalice deploy ... https://endpoint/dev @@ -46,7 +98,6 @@ It provides: {"hello": "world"} Up and running in less than 30 seconds. - Give this project a try and share your feedback with us here on Github. The documentation is available
Remove unnecessary ObservedData constraint first_observed and last_observed are both required, so this co-constraint was removed from WD04.
@@ -369,10 +369,6 @@ class ObservedData(STIXDomainObject): def _check_object_constraints(self): super(self.__class__, self)._check_object_constraints() - if self.get('number_observed', 1) == 1: - self._check_properties_dependency(['first_observed'], ['last_observed']) - self._check_properties_dependency(['last_observed'], ['first_observed']) - first_observed = self.get('first_observed') last_observed = self.get('last_observed')
MAINT: Fixup quantile tests to not use `np.float` This is deprecated usage, also small code style fixups since I was looking at it anyway. YMMV, but I think its a bit/nicer more compact now.
@@ -3116,12 +3116,11 @@ def test_quantile_monotonic(self): 8, 8, 7]) * 0.1, p0) assert_equal(np.sort(quantile), quantile) - @hypothesis.given(arr=arrays(dtype=np.float, shape=st.integers(min_value=3, - max_value=1000), - elements=st.floats(allow_infinity=False, - allow_nan=False, - min_value=-1e300, - max_value=1e300))) + @hypothesis.given( + arr=arrays(dtype=np.float64, + shape=st.integers(min_value=3, max_value=1000), + elements=st.floats(allow_infinity=False, allow_nan=False, + min_value=-1e300, max_value=1e300))) def test_quantile_monotonic_hypo(self, arr): p0 = np.arange(0, 1, 0.01) quantile = np.quantile(arr, p0) @@ -3166,8 +3165,10 @@ def test_lerp_bounded(self, t, a, b): b=st.floats(allow_nan=False, allow_infinity=False, min_value=-1e300, max_value=1e300)) def test_lerp_symmetric(self, t, a, b): - # double subtraction is needed to remove the extra precision that t < 0.5 has - assert np.lib.function_base._lerp(a, b, 1 - (1 - t)) == np.lib.function_base._lerp(b, a, 1 - t) + # double subtraction is needed to remove the extra precision of t < 0.5 + left = np.lib.function_base._lerp(a, b, 1 - (1 - t)) + right = np.lib.function_base._lerp(b, a, 1 - t) + assert left == right def test_lerp_0d_inputs(self): a = np.array(2)
Bump package requirements to latest versions pydocstyle: flake8: coverage: This came about because the version of coverage we were using was broken for python3. It would report 100% coverage and only report on the __init__.py files. The latest version of coverage fixes this.
# Dev requirements, used for various linting tools -coverage==4.0.3 -flake8==2.5.0 +coverage==4.3.4 +flake8==3.3.0 tox==2.2.1 wheel==0.26.0 doc8==0.7.0 # Pylint will fail on py3. Locking to a commit on master # until pylint2 is released. -e git://github.com/PyCQA/pylint.git@7cb3ffddfd96f5e099ca697f6b1e30e727544627#egg=pylint -pytest-cov==2.3.1 -pydocstyle==1.0.0 +pytest-cov==2.4.0 +pydocstyle==2.0.0 # Test requirements -pytest==3.0.3 -py==1.4.31 +pytest==3.0.7 +py==1.4.33 pygments==2.1.3 mock==2.0.0 requests==2.11.1
[modules/contrib/dnf] fix undefined "widget" error while refactoring, i overlooked that the variable "widget" doesn't exist anymore. see
@@ -29,6 +29,7 @@ class Module(core.module.Module): return "/".join(result) def update(self): + widget = self.widget() res = util.cli.execute("dnf updateinfo", ignore_errors=True) security = 0
Installation commands python3, pip3 & mkdir ~/.mythril
@@ -9,7 +9,7 @@ Mythril is a security analysis tool for Ethereum smart contracts. It uses concol Install from Pypi: ```bash -$ pip install mythril +$ pip3 install mythril ``` Or, clone the GitHub repo to install the newest master branch: @@ -17,7 +17,7 @@ Or, clone the GitHub repo to install the newest master branch: ```bash $ git clone https://github.com/b-mueller/mythril/ $ cd mythril -$ python setup.py install +$ python3 setup.py install ``` Note that Mythril requires Python 3.5 to work. @@ -27,6 +27,7 @@ Note that Mythril requires Python 3.5 to work. Whenever you disassemble or analyze binary code, Mythril will try to resolve function names using its local signature database. The database must be provided at `~/.mythril/signatures.json`. You can start out with the [default file](signatures.json) as follows: ``` +$ mkdir ~/.mythril $ cd ~/.mythril $ wget https://raw.githubusercontent.com/b-mueller/mythril/master/signatures.json ```
Fixed E271 flake8 errors multiple spaces after keyword
@@ -196,6 +196,6 @@ filterwarnings = ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning [flake8] -ignore = E271,E272,E293,E301,E302,E303,E401,E402,E501,E701,E702,E704,E712,E731 +ignore = E272,E293,E301,E302,E303,E401,E402,E501,E701,E702,E704,E712,E731 max-line-length = 120 exclude = _pytest/vendored_packages/pluggy.py
TUTORIAL: minor cleanup An attempt to make part of the tutorial less jarring, and reformatting of the surrounding text to keep the lines <80 characters.
@@ -62,14 +62,14 @@ and verify metadata files. To begin, cryptographic keys are generated with the repository tool. However, before metadata files can be validated by clients and target files fetched in a secure manner, public keys must be pinned to particular metadata roles and -metadata signed by role's private keys. After covering keys, the four required -top-level metadata are created next. Examples are given demonstrating the -expected work flow, where the metadata roles are created in a specific order, -keys imported and loaded, and metadata signed and written to disk. Lastly, -target files are added to the repository, and a custom delegation performed to -extend the default roles of the repository. By the end, a fully populated TUF -repository is generated that can be used by clients to securely download -updates. +metadata signed by the role's private keys. After covering keys, the four +required top-level metadata are created next. Examples are given demonstrating +the expected work flow, where the metadata roles are created in a specific +order, keys imported and loaded, and metadata signed and written to disk. +Lastly, target files are added to the repository, and a custom delegation +performed to extend the default roles of the repository. By the end, a fully +populated TUF repository is generated that can be used by clients to securely +download updates. ### Keys ### The repository tool supports multiple public-key algorithms, such as
Improvement - Changing button to show engine name and version Engine patch version being proper shown
<div class="control-label"><label>Engine migration:</label></div> <div class="controls"> {% if retry_migrate_plan %} - <button data-toggle="modal" class="btn btn-warning" id="migrate_plan_retry_btn" data-target="#migrate_plan_retry">Retry Migrating Oracle to Percona</button> + <button data-toggle="modal" class="btn btn-warning" id="migrate_plan_retry_btn" data-target="#migrate_plan_retry">Retry Oracle {{ database.infra.engine_patch.engine.name|title }} {{ database.infra.engine_patch.full_version }} to {{ database.infra.plan.migrate_engine_equivalent_plan.engine.name|title }} {{ database.infra.plan.migrate_engine_equivalent_plan.engine.full_inicial_version }}</button> {% else %} - <button data-toggle="modal" class="btn btn-primary" id="migrate_plan_btn" data-target="#migrate_plan">Migrate Oracle to Percona</button> + <button data-toggle="modal" class="btn btn-primary" id="migrate_plan_btn" data-target="#migrate_plan">Oracle {{ database.infra.engine_patch.engine.name|title }} {{ database.infra.engine_patch.full_version }} to {{ database.infra.plan.migrate_engine_equivalent_plan.engine.name|title }} {{ database.infra.plan.migrate_engine_equivalent_plan.engine.full_inicial_version }} </button> {% endif %} </div> </div>
Move `omit` to `run` section in .coveragerc Coverage used to be configured to omit certain directories while reporting. This commit slightly optimizes coverage to already omit those directories while measuring coverage.
[run] branch = True -[report] -exclude_lines = - pragma: no cover - def __str__ - if __name__ == .__main__.: - omit = # Command-line scripts. */tuf/scripts/client.py */tuf/scripts/repo.py */tests/* */site-packages/* + +[report] +exclude_lines = + pragma: no cover + def __str__ + if __name__ == .__main__.:
Fix error when running scripts This fixes the error Can't pickle local object 'LDAPBackend.__new__.<locals>.NBLDAPBackend'
@@ -140,11 +140,25 @@ class RemoteUserBackend(_RemoteUserBackend): return False +# Create a new instance of django-auth-ldap's LDAPBackend with our own ObjectPermissions +try: + from django_auth_ldap.backend import LDAPBackend as LDAPBackend_ + + class NBLDAPBackend(ObjectPermissionMixin, LDAPBackend_): + def get_permission_filter(self, user_obj): + permission_filter = super().get_permission_filter(user_obj) + if self.settings.FIND_GROUP_PERMS: + permission_filter = permission_filter | Q(groups__name__in=user_obj.ldap_user.group_names) + return permission_filter +except ModuleNotFoundError: + pass + + class LDAPBackend: def __new__(cls, *args, **kwargs): try: - from django_auth_ldap.backend import LDAPBackend as LDAPBackend_, LDAPSettings + from django_auth_ldap.backend import LDAPSettings import ldap except ModuleNotFoundError as e: if getattr(e, 'name') == 'django_auth_ldap': @@ -170,14 +184,6 @@ class LDAPBackend: "Required parameter AUTH_LDAP_SERVER_URI is missing from ldap_config.py." ) - # Create a new instance of django-auth-ldap's LDAPBackend with our own ObjectPermissions - class NBLDAPBackend(ObjectPermissionMixin, LDAPBackend_): - def get_permission_filter(self, user_obj): - permission_filter = super().get_permission_filter(user_obj) - if self.settings.FIND_GROUP_PERMS: - permission_filter = permission_filter | Q(groups__name__in=user_obj.ldap_user.group_names) - return permission_filter - obj = NBLDAPBackend() # Read LDAP configuration parameters from ldap_config.py instead of settings.py
[bugfix] Fix default alias for "thumb" "mini" is the new default alias for "thumb" in German after
@@ -264,18 +264,18 @@ class TestLiveCosmeticChanges(TestCosmeticChanges): def test_translateMagicWords(self): """Test translateMagicWords method.""" self.assertEqual( - '[[File:Foo.bar|miniatur]]', + '[[File:Foo.bar|mini]]', self.cct.translateMagicWords('[[File:Foo.bar|thumb]]')) self.assertEqual( - '[[File:Foo.bar|miniatur]]', - self.cct.translateMagicWords('[[File:Foo.bar|mini]]')) + '[[File:Foo.bar|mini]]', + self.cct.translateMagicWords('[[File:Foo.bar|miniatur]]')) # test local namespace self.assertEqual( - '[[Datei:Foo.bar|miniatur]]', + '[[Datei:Foo.bar|mini]]', self.cct.translateMagicWords('[[Datei:Foo.bar|thumb]]')) # test multiple magic words self.assertEqual( - '[[File:Foo.bar|links|miniatur]]', + '[[File:Foo.bar|links|mini]]', self.cct.translateMagicWords('[[File:Foo.bar|left|thumb]]')) # test magic words at the end self.assertEqual(
Removed button from form_error_message.html There's already a link, and the button looks weird when there are multiple errors. This was added for new users making their first app, but we streamlined that flow in other ways (redirecting to reg form, adding a default question to folllowup form).
{# Poor spacing in this file because this template is used in the middle of sentences #}{% load xforms_extras %}{% load i18n %}{% if not not_actual_build %} "<a href="{% url "form_source" domain app.id error.form.unique_id %}">{{ error.form.name|trans:langs }}</a>" Form - in the "{{ error.module.name|trans:langs }}" Menu.<br /> - <a class="btn btn-primary btn-xs" href="{% url "form_source" domain app.id error.form.unique_id %}">{% trans 'Go to' %} {{ error.form.name|trans:langs }} {% trans 'Form' %}</a> + in the "{{ error.module.name|trans:langs }}" Menu {% else %}{{ no_form }}{% endif %}
[cleanup] reduce code complexity of generate_user_files.create_user_config Saving botpassword becomes its own function
# -*- coding: utf-8 -*- """Script to create user-config.py.""" # -# (C) Pywikibot team, 2010-2018 +# (C) Pywikibot team, 2010-2019 # # Distributed under the terms of the MIT license. # @@ -371,6 +371,11 @@ def create_user_config(main_family, main_code, main_username, force=False): os.remove(_fnc) raise + save_botpasswords(botpasswords, _fncpass) + + +def save_botpasswords(botpasswords, _fncpass): + """Write botpasswords to file.""" if botpasswords: # Save user-password.py if necessary # user-config.py is already created at this point
Generalise prefer forward burn Previously it would not prefer a forward burn after a real travel.
@@ -1622,21 +1622,33 @@ def short_travel_cutcode(context: CutCode, channel=None): closest = cut backwards = True if d <= 0.1: # Distance in px is zero, we cannot improve. - # Need to swap to next segment forward if it is coincident and permitted - if ( - cut.next - and cut.next.permitted - and cut.next.burns_remaining >= 1 - and cut.next.start == cut.end - ): - closest = cut.next - backwards = False break distance = d closest_length = l if closest is None: break + + # Change direction if other direction is coincident and has more burns remaining + if backwards: + if ( + closest.next + and closest.next.permitted + and closent.next.burns_remaining >= closest.burns_remaining + and closest.next.start == closest.end + ): + closest = closest.next + backwards = False + else: + if ( + closest.prev + and closest.prev.permitted + and closent.prev.burns_remaining > closest.burns_remaining + and closest.prev.end == closest.start + ): + closest = closest.next + backwards = False + closest.burns_remaining -= 1 if closest.burns_remaining == 0: closest.permitted = False
add log action to plugin loader this is introduced for debugging and monitoring purposes
@@ -2,6 +2,9 @@ from mythril.laser.ethereum.svm import LaserEVM from mythril.laser.ethereum.plugins.plugin import LaserPlugin from typing import List +import logging + +log = logging.getLogger(__name__) class LaserPluginLoader: @@ -23,6 +26,7 @@ class LaserPluginLoader: :param laser_plugin: plugin that will be loaded in the symbolic virtual machine """ + log.info("Loading plugin: {}".format(str(laser_plugin))) laser_plugin.initialize(self.symbolic_vm) self.laser_plugins.append(laser_plugin)
Allow for more normalisations in compute_rms Added "abs" and "none" normalisation to compute_rms function.
@@ -204,7 +204,7 @@ class Powerspectrum(Crossspectrum): if self.norm.lower() == 'leahy': powers_leahy = powers.copy() - elif self.norm.lower() == "frac": + elif self.norm.lower() in ["frac", "abs", "none"]: powers_leahy = \ self.unnorm_power[minind:maxind].real * 2 / nphots else:
STY: updated prep_dir function Removed unused kwarg catch and added potentially useful informative output.
@@ -20,25 +20,26 @@ import pysat from pysat.tests.registration_test_class import TestWithRegistration -def prep_dir(inst=None): +def prep_dir(inst): """Prepare the directory to provide netCDF export file support Parameters ---------- - inst : pysat.Instrument or NoneType - Instrument class object or None to use 'pysat_testing.py' (default=None) + inst : pysat.Instrument + Instrument class object - """ - if inst is None: - inst = pysat.Instrument(platform='pysat', name='testing') + Returns + ------- + bool + True if directories create, False if not + """ # Create data directories try: os.makedirs(inst.files.data_path) + return True except OSError: - pass - - return + return False def remove_files(inst):
Mem isolation Documentation addition
@@ -30,7 +30,7 @@ Required agent options ------------------------------ - ``containerizers=mesos`` - to enable PID based cgroup discovery, -- ``isolation=cgroups/cpu,cgroups/perf_event`` - to enable CPU shares management and perf event monitoring, +- ``isolation=cgroups/cpu,cgroups/perf_event,cgroups/mem`` - to enable CPU shares management and perf event monitoring, - ``perf_events=cycles`` and ``perf_interval=360days`` - to enable perf event subsystem cgroup management without actual counter collection. Following exact setup was verified to work with provided `workloads </workloads>`_:
[BUG] Fix override/defaulting of "prediction intervals" adders This overrides the `_predict_quantiles` method of `conformal.py` with the base class default to ensure that the `_predict_quantiles` method for the forecaster that is wrapped with `conformal.py` is consistent with `_predict_interval`.
@@ -244,6 +244,36 @@ class ConformalIntervals(BaseForecaster): return pred_int.convert_dtypes() + def _predict_quantiles(self, fh, X, alpha): + """Compute/return prediction quantiles for a forecast. + + private _predict_quantiles containing the core logic, + called from predict_quantiles and default _predict_interval + + Parameters + ---------- + fh : guaranteed to be ForecastingHorizon + The forecasting horizon with the steps ahead to to predict. + X : optional (default=None) + guaranteed to be of a type in self.get_tag("X_inner_mtype") + Exogeneous time series to predict from. + alpha : list of float, optional (default=[0.5]) + A list of probabilities at which quantile forecasts are computed. + + Returns + ------- + quantiles : pd.DataFrame + Column has multi-index: first level is variable name from y in fit, + second level being the values of alpha passed to the function. + Row index is fh, with additional (upper) levels equal to instance levels, + from y seen in fit, if y_inner_mtype is Panel or Hierarchical. + Entries are quantile forecasts, for var in col index, + at quantile probability in second col index, for the row index. + """ + pred_int = BaseForecaster._predict_quantiles(self, fh, X, alpha) + + return pred_int + def _compute_sliding_residuals(self, y, X, forecaster, initial_window, sample_frac): """Compute sliding residuals used in uncertainty estimates.
Remove mox from nova.tests.unit.virt.xenapi.test_vm_utils.py remove self.stubs.Set to mock decorator Part of blueprint remove-mox-pike
@@ -842,16 +842,13 @@ class VDIOtherConfigTestCase(VMUtilsTestBase): self.assertEqual(expected, self.session.args[0]['other_config']) - def test_create_image(self): + @mock.patch.object(vm_utils, '_fetch_image', + return_value={'root': {'uuid': 'fake-uuid'}}) + def test_create_image(self, mock_vm_utils): # Other images are registered implicitly when they are dropped into # the SR by a dom0 plugin or some other process self.flags(cache_images='none', group='xenserver') - def fake_fetch_image(*args): - return {'root': {'uuid': 'fake-uuid'}} - - self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image) - other_config = {} def VDI_add_to_other_config(ref, key, value):
startup: Prettyify video depth dump Use the right unit. for rgb10x2, this prints "30bpp" instead of "0x1E".
@@ -59,7 +59,7 @@ void dump_boot_args(struct boot_args *ba) printf(" stride: 0x%lx\n", ba->video.stride); printf(" width: %lu\n", ba->video.width); printf(" height: %lu\n", ba->video.height); - printf(" depth: 0x%lx\n", ba->video.depth); + printf(" depth: %lubpp\n", ba->video.depth); printf(" machine_type: %d\n", ba->machine_type); printf(" devtree: %p\n", ba->devtree); printf(" devtree_size: 0x%x\n", ba->devtree_size);
project_file.mako: add -DDEBUG=1 to C compile switches in debug mode This will repair automatic loading of GDB helpers (previous commit on this topic actually missed that). TN:
@@ -149,9 +149,24 @@ library project ${lib_name} is Ada_Mode_Args := ("-gnatp", "-gnatn2", "-fnon-call-exceptions"); end case; + ----------------- + -- C_Mode_Args -- + ----------------- + + -- Compilation switches for C that depend on the build mode + + C_Mode_Args := (); + case Build_Mode is + when "dev" => + C_Mode_Args := ("-DDEBUG=1"); + + when "prod" => + null; + end case; + for Default_Switches ("Ada") use Mode_Args & Ada_Mode_Args & Common_Ada_Cargs; - for Default_Switches ("C") use Mode_Args; + for Default_Switches ("C") use Mode_Args & C_Mode_Args; case Build_Mode is when "prod" =>
Cleanup, don't patch traceback dealloc. * This was never really necessary, and most probably all about bug hiding only. * Also doing it on the fly was only going to waste cycles per frame creation.
@@ -408,24 +408,10 @@ void _initCompiledFrameType( void ) } -static void tb_dealloc( PyTracebackObject *tb ) -{ - // printf( "dealloc TB %ld %lx FR %ld %lx\n", Py_REFCNT( tb ), (long)tb, Py_REFCNT( tb->tb_frame ), (long)tb->tb_frame ); - - Nuitka_GC_UnTrack( tb ); - // Py_TRASHCAN_SAFE_BEGIN(tb) - Py_XDECREF( tb->tb_next ); - Py_XDECREF( tb->tb_frame ); - PyObject_GC_Del( tb ); - // Py_TRASHCAN_SAFE_END(tb) -} - extern PyObject *const_str_plain___module__; static PyFrameObject *MAKE_FRAME( PyCodeObject *code, PyObject *module, bool is_module ) { - PyTraceBack_Type.tp_dealloc = (destructor)tb_dealloc; - assertCodeObject( code ); PyObject *globals = ((PyModuleObject *)module)->md_dict;
Only update rays if they are not blocked. We actually stop propagating the ray if it is blocked, this way we can keep track of where it was actually blocked.
@@ -332,9 +332,10 @@ class Matrix(object): """ outputRay = Ray() + + if rightSideRay.isNotBlocked: outputRay.y = self.A * rightSideRay.y + self.B * rightSideRay.theta outputRay.theta = self.C * rightSideRay.y + self.D * rightSideRay.theta - outputRay.z = self.L + rightSideRay.z outputRay.apertureDiameter = self.apertureDiameter @@ -342,6 +343,8 @@ class Matrix(object): outputRay.isBlocked = True else: outputRay.isBlocked = rightSideRay.isBlocked + else: + outputRay = rightSideRay return outputRay
Store epoch timestamps instead of strings. We're also switching from datetime.now() to datetime.utcnow().
@@ -9,7 +9,6 @@ from contextlib import suppress from datetime import datetime from pathlib import Path -import dateutil import discord import discord.abc from discord.ext import commands @@ -550,9 +549,9 @@ class HelpChannels(Scheduler, commands.Cog): self.bot.stats.incr(f"help.dormant_calls.{caller}") if await self.claim_times.contains(channel.id): - claimed_datestring = await self.claim_times.get(channel.id) - claimed = dateutil.parser.parse(claimed_datestring) - in_use_time = datetime.now() - claimed + claimed_timestamp = await self.claim_times.get(channel.id) + claimed = datetime.fromtimestamp(claimed_timestamp) + in_use_time = datetime.utcnow() - claimed self.bot.stats.timing("help.in_use_time", in_use_time) if await self.unanswered.contains(channel.id): @@ -688,7 +687,7 @@ class HelpChannels(Scheduler, commands.Cog): self.bot.stats.incr("help.claimed") - await self.claim_times.set(channel.id, str(datetime.now())) + await self.claim_times.set(channel.id, datetime.utcnow().timestamp()) await self.unanswered.set(channel.id, True) log.trace(f"Releasing on_message lock for {message.id}.")
Fix cover `src` was a placeholder. Use `data-src` instead
@@ -54,7 +54,7 @@ class DaoNovelCrawler(Crawler): possible_image = soup.select_one(".summary_image a img") if isinstance(possible_image, Tag): - self.novel_cover = self.absolute_url(possible_image["src"]) + self.novel_cover = self.absolute_url(possible_image["data-src"]) logger.info("Novel cover: %s", self.novel_cover) self.novel_author = " ".join(
ceph-container-engine: lvm2 on OSD nodes only Since the lvm2 package installation has been moved from ceph-osd role to ceph-container-engine role. But the scope wasn't limited to the OSD nodes only. This commit fixes this behaviour.
tags: with_pkg -- name: install container and lvm2 packages +- name: install container packages package: - name: ['{{ container_package_name }}', '{{ container_binding_name }}', 'lvm2'] + name: ['{{ container_package_name }}', '{{ container_binding_name }}'] update_cache: true register: result until: result is succeeded tags: with_pkg +- name: install lvm2 package + package: + name: lvm2 + register: result + until: result is succeeded + tags: with_pkg + when: inventory_hostname in groups.get(osd_group_name, []) + - name: start container service service: name: '{{ container_service_name }}'
refactor: use frappe.get_system_settings Not sure why this needs YET ANOTHER separate cache.
@@ -1157,10 +1157,7 @@ class Database: return INDEX_PATTERN.sub(r"", index_name) def get_system_setting(self, key): - def _load_system_settings(): - return self.get_singles_dict("System Settings") - - return frappe.cache().get_value("system_settings", _load_system_settings).get(key) + return frappe.get_system_settings(key) def close(self): """Close database connection."""
Try workbox's NetworkFirst strategy Might "just work" for the online parts when available (firebase etc), while falling back to cache ...
// Otherwise webpack can fail silently // https://github.com/facebook/create-react-app/issues/8014 -// <TODO-DELETE> -// import {serviceWorkerFetchListener} from "sync-message"; -// -// console.log(self.__WB_MANIFEST); -// -// const fetchListener = serviceWorkerFetchListener(); -// -// addEventListener('fetch', fetchListener); -// -// addEventListener('install', function (e) { -// e.waitUntil(self.skipWaiting()); -// }); -// -// addEventListener('activate', function (e) { -// e.waitUntil(self.clients.claim()); -// }); -// </TODO-DELETE> - - // This service worker can be customized! // See https://developers.google.com/web/tools/workbox/modules // for the list of available Workbox modules, or add any other @@ -32,7 +13,7 @@ import { clientsClaim } from 'workbox-core'; import { ExpirationPlugin } from 'workbox-expiration'; import { precacheAndRoute, createHandlerBoundToURL } from 'workbox-precaching'; import { registerRoute } from 'workbox-routing'; -import { StaleWhileRevalidate } from 'workbox-strategies'; +import {NetworkFirst, StaleWhileRevalidate} from 'workbox-strategies'; clientsClaim(); @@ -67,23 +48,19 @@ registerRoute( createHandlerBoundToURL(process.env.PUBLIC_URL + '/index.html') ); -// An example runtime caching route for requests that aren't handled by the -// precache, in this case same-origin .png requests like those from in public/ registerRoute( - // Add in any other file extensions or routing criteria as needed. ({ url }) => true, // XXX Caching everything for now. It breaks firebase, but it makes most of the PWA work offline. We can narrow it down later. - new StaleWhileRevalidate({ + new NetworkFirst({ cacheName: 'everything', plugins: [ - // Ensure that once this runtime cache reaches a maximum size the - // least-recently used stuff is removed. + // "Ensure that once this runtime cache reaches a maximum size the + // least-recently used stuff is removed." new ExpirationPlugin({ maxEntries: 50 }), ], }) ); -// This allows the web app to trigger skipWaiting via -// registration.waiting.postMessage({type: 'SKIP_WAITING'}) +// "This allows the web app to trigger skipWaiting via registration.waiting.postMessage({type: 'SKIP_WAITING'})" self.addEventListener('message', (event) => { if (event.data && event.data.type === 'SKIP_WAITING') { self.skipWaiting();
Realized SSA isn't a full app, it's just a directory with detections. Made changes so that it won't run through slim or appinspect. Will manually verify by looking at the artifacts that it is correct.
@@ -272,7 +272,7 @@ jobs: slim package -o upload DA-ESS-ContentUpdate slim package -o upload DA-ESS_AmazonWebServices_Content slim package -o upload dev_sec_ops_analytics - slim package -o upload SSA_Content + $slim package -o upload SSA_Content cp upload/DA-ESS-ContentUpdate-*.tar.gz DA-ESS-ContentUpdate-latest.tar.gz sha256sum DA-ESS-ContentUpdate-latest.tar.gz > checksum.txt @@ -283,10 +283,14 @@ jobs: cp upload/dev_sec_ops_analytics-*tar.gz dev_sec_ops_analytics-latest.tar.gz sha256sum dev_sec_ops_analytics-latest.tar.gz >> checksum.txt + #Do this copy so that we conform as much as possible, and have to make + #as few changes as possible, once we start generating this as a real, + #properly packaged app + tar -zcf upload/SSA_Content-NO_SLIM.tar.gz SSA_Content cp upload/SSA_Content-*.tar.gz SSA_Content-latest.tar.gz sha256sum SSA_Content-latest.tar.gz >> checksum.txt - touch tag-canary.txt + - name: store_artifacts uses: actions/upload-artifact@v2 @@ -347,7 +351,8 @@ jobs: ./appinspect.sh ../ DA-ESS-ContentUpdate-latest.tar.gz "$APPINSPECT_USERNAME" "$APPINSPECT_PASSWORD" ./appinspect.sh ../ DA-ESS_AmazonWebServices_Content-latest.tar.gz "$APPINSPECT_USERNAME" "$APPINSPECT_PASSWORD" ./appinspect.sh ../ dev_sec_ops_analytics-latest.tar.gz "$APPINSPECT_USERNAME" "$APPINSPECT_PASSWORD" - ./appinspect.sh ../ SSA_Content-latest.tar.gz "$APPINSPECT_USERNAME" "$APPINSPECT_PASSWORD" + #Not a real app yet, so it will fail appinspect - remove the comment when it's a real app generated with SLIM + #./appinspect.sh ../ SSA_Content-latest.tar.gz "$APPINSPECT_USERNAME" "$APPINSPECT_PASSWORD" - name: Create report artifact if: always()
Explicitly don't support nbytes in Series As discussed in issue , not to explicitly use nbytes from the Series.
@@ -32,7 +32,6 @@ class _MissingPandasLikeSeries(object): # Properties axes = unsupported_property('axes') iat = unsupported_property('iat') - nbytes = unsupported_property('nbytes') # Deprecated properties blocks = unsupported_property('blocks', deprecated=True) @@ -156,6 +155,11 @@ class _MissingPandasLikeSeries(object): real = unsupported_property( 'real', reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead.") + nbytes = unsupported_property( + 'nbytes', + reason="'nbytes' requires to compute whole dataset. You can calculate manually it, " + "with its 'itemsize', by explicitly executing its count. Use Spark's web UI " + "to monitor disk and memory usage of your application in general.") # Functions we won't support. memory_usage = common.memory_usage(unsupported_function)
Fix attribute error during rapid disconnects in VoiceClient. Fix
@@ -220,6 +220,7 @@ class VoiceClient(VoiceProtocol): self._player = None self.encoder = None self._lite_nonce = 0 + self.ws = None warn_nacl = not has_nacl supported_modes = (
Order Loss functions alphabetically in nn.rst Summary: Pull Request resolved:
@@ -1182,6 +1182,11 @@ Loss functions .. autofunction:: binary_cross_entropy +:hidden:`binary_cross_entropy_with_logits` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: binary_cross_entropy_with_logits + :hidden:`poisson_nll_loss` ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1247,11 +1252,6 @@ Loss functions .. autofunction:: nll_loss -:hidden:`binary_cross_entropy_with_logits` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: binary_cross_entropy_with_logits - :hidden:`smooth_l1_loss` ~~~~~~~~~~~~~~~~~~~~~~~~
Update environments.py Added more informative error message for possible failures.
@@ -383,7 +383,7 @@ class HolodeckEnvironment(object): try: loading_semaphore.acquire(100) except posix_ipc.BusyError: - raise HolodeckException("Timed out waiting for binary to load") + raise HolodeckException("Timed out waiting for binary to load. Ensure that holodeck is not being run with root priveleges.") loading_semaphore.unlink() def __windows_start_process__(self, binary_path, task_key, verbose):
Change log setup deployd This should be better than the previous option. It logs to both stderr and syslog with a message that includes the log level, module/class name and the message.
@@ -4,6 +4,8 @@ from __future__ import unicode_literals import inspect import logging +import logging.handlers +import os import socket import time @@ -81,13 +83,19 @@ class DeployDaemon(PaastaThread): super(DeployDaemon, self).__init__() self.started = False self.daemon = True + self.config = load_system_paasta_config() + root_logger = logging.getLogger() + root_logger.setLevel(getattr(logging, self.config.get_deployd_log_level())) + log_handlers = [logging.StreamHandler()] + if os.path.exists('/dev/log'): + log_handlers.append(logging.handlers.SysLogHandler('/dev/log')) + for handler in log_handlers: + root_logger.addHandler(handler) + handler.setFormatter(logging.Formatter('%(levelname)s:%(name)s:%(message)s')) self.bounce_q = PaastaQueue("BounceQueue") self.inbox_q = PaastaQueue("InboxQueue") self.control = PaastaQueue("ControlQueue") self.inbox = Inbox(self.inbox_q, self.bounce_q) - self.config = load_system_paasta_config() - log_format = '%(asctime)s:%(levelname)s:%(name)s:%(message)s' - logging.basicConfig(level=getattr(logging, self.config.get_deployd_log_level()), format=log_format) def run(self): self.log.info("paasta-deployd starting up...")
first patch: allowed to use the own metamodel, when loading a file with unknown extension
@@ -142,16 +142,24 @@ class GlobalModelRepository(object): Returns: the list of loaded models """ - from textx import metamodel_for_file + from textx import metamodel_for_file, get_metamodel if model: self.update_model_in_repo_based_on_filename(model) + the_metamodel = get_metamodel(model) # default metamodel + else: + the_metamodel = None filenames = glob.glob(filename_pattern, **glob_args) if len(filenames) == 0: raise IOError( errno.ENOENT, os.strerror(errno.ENOENT), filename_pattern) loaded_models = [] for filename in filenames: + try: the_metamodel = metamodel_for_file(filename) + # TODO, I would prefer to query if the language was found... + except: + if the_metamodel is None: # no metamodel defined... + raise loaded_models.append( self.load_model(the_metamodel, filename, is_main_model, encoding=encoding,
Fix typos in flask_rest_api_tutorial dependenices->dependencies documentaion->documentation and and->and
@@ -42,7 +42,7 @@ with high performance requirements. For that: # Dependencies # ------------ # -# Install the required dependenices by running the following command: +# Install the required dependencies by running the following command: # # :: # @@ -53,7 +53,7 @@ with high performance requirements. For that: # Simple Web Server # ----------------- # -# Following is a simple webserver, taken from Flask's documentaion +# Following is a simple webserver, taken from Flask's documentation from flask import Flask @@ -335,7 +335,7 @@ with open("../_static/img/sample_file.jpeg", 'rb') as f: # Next steps # -------------- # -# The server we wrote is quite trivial and and may not do everything +# The server we wrote is quite trivial and may not do everything # you need for your production application. So, here are some things you # can do to make it better: #
Add new trouble shooting case Add a case where an older version of python3 interferes with the installation of poetry.
@@ -7,6 +7,8 @@ Troubleshooting - `Many missing packages <#many-missing-packages>`__ - `Error: Poetry could not find a pyproject.toml file <#error-poetry-could-not-find-a-pyproject-toml-file>`__ + - `Error: Poetry \"The virtual environment seems to be broken\" + <#error-poetry-the-virtual-environment-seems-to-be-broken>`__ - `Using VSCode <#using-vscode>`__ @@ -50,6 +52,12 @@ Poetry provides different packages according to the folder, and depends on the ``pyproject.toml`` file in the current folder. Make sure to run ``poetry`` in the root folder of LISA. +Error: Poetry "The virtual environment seems to be broken" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Ensure that ``python3 --version`` returns python3.8 before trying to install poetry. If the command points to an older version of python3, you must uninstall then reinstall poetry after ensuring that virtualenv is installed with pip3 using python3.8. + + Using VSCode ------------
Documentation LU Decomposition: deriving L, U, and P Summary: Add note to LU decomposition to use `lu_unpack` to get `L`, `U`, and `P`. Fixes Pull Request resolved:
@@ -984,6 +984,9 @@ def _lu_impl(A, pivot=True, get_infos=False, out=None): for singular matrices due to the bug in the MAGMA library (see magma issue 13). + .. note:: + ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`. + Arguments: A (Tensor): the tensor to factor of size :math:`(*, m, n)` pivot (bool, optional): controls whether pivoting is done. Default: ``True``
ENH: added NaN testing Added NaN testing to list evaluation functions.
import numpy as np -def assert_list_contains(small_list, big_list): +def assert_list_contains(small_list, big_list, test_nan=False): """ Assert all elements of one list exist within the other list Parameters @@ -18,6 +18,8 @@ def assert_list_contains(small_list, big_list): List whose values must all be present within big_list big_list : list List that must contain all the values in small_list + test_nan : bool + Test the lists for the presence of NaN values Raises ------ @@ -25,14 +27,25 @@ def assert_list_contains(small_list, big_list): If a small_list value is missing from big_list """ + if test_nan: + big_num_nan = np.isnan(big_list).sum() + small_num_nan = 0 + # Test the presence of non-NaN values from `small_list` in `big_list` and + # determine the number of NaN values in `small_list` for value in small_list: + if test_nan and np.isnan(value): + small_num_nan += 1 + else: assert value in big_list, "{:} not in {:}".format(value, big_list) + if test_nan: + # Ensure `small_list` does not have more NaNs than `big_list` + assert small_num_nan <= big_num_nan return -def assert_lists_equal(list1, list2): +def assert_lists_equal(list1, list2, test_nan=False): """Assert that the lists contain the same elements Parameters @@ -41,6 +54,8 @@ def assert_lists_equal(list1, list2): Input list one list2 : list Input list two + test_nan : bool + Test the lists for the presence of NaN values Raises ------ @@ -55,7 +70,7 @@ def assert_lists_equal(list1, list2): """ assert len(list1) == len(list2) - assert_list_contains(list1, list2) + assert_list_contains(list1, list2, test_nan=test_nan) return
server: Drop redundant `VettedPeer.__init__` server: Drop unused `VettedPeer.__init__`
@@ -26,10 +26,6 @@ class VettedPeer: last_attempt: uint64 = uint64(0) time_added: uint64 = uint64(0) - def __init__(self, h: str, p: uint16): - self.host = h - self.port = p - def __eq__(self, rhs: object) -> bool: return self.host == rhs.host and self.port == rhs.port # type: ignore[no-any-return, attr-defined]
[tune/release] Demote xgboost_sweep to weekly testing XGBoost functionality is tested daily in the xgboost release test suite. The expensive XGBoost sweep test can thus be run weekly.
@@ -198,7 +198,6 @@ NIGHTLY_TESTS = { SmokeTest("network_overhead"), "result_throughput_cluster", "result_throughput_single_node", - "xgboost_sweep", ], "~/ray/release/xgboost_tests/xgboost_tests.yaml": [ "train_small", @@ -252,6 +251,7 @@ WEEKLY_TESTS = { "~/ray/release/tune_tests/scalability_tests/tune_tests.yaml": [ "network_overhead", "long_running_large_checkpoints", + "xgboost_sweep", ], "~/ray/release/rllib_tests/rllib_tests.yaml": [ "learning_tests",
remove not from vault utils If the role is 'master' then the vault configs should just be pulled from the opts dictionary
@@ -98,7 +98,7 @@ def _get_vault_connection(): Get the connection details for calling Vault, from local configuration if it exists, or from the master otherwise ''' - if 'vault' in __opts__ and not __opts__.get('__role', 'minion') == 'master': + if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master': log.debug('Using Vault connection details from local config') try: return {
Fix precommit pyupgrade noticed that with `from __future__ import annotations` I no longer needed to import Typing, List etc. so it made the change. But then pylint was not happy because I was not using an import.
@@ -27,16 +27,14 @@ import random import time from abc import ABC from enum import Enum -from typing import Callable, Deque, Dict, List +from typing import Callable, Deque from esrally import exceptions from esrally.track import track from esrally.utils import io -# pylint: disable=used-before-assignment - -__PARAM_SOURCES_BY_OP: Dict[track.OperationType, ParamSource] = {} -__PARAM_SOURCES_BY_NAME: Dict[str, ParamSource] = {} +__PARAM_SOURCES_BY_OP: dict[track.OperationType, ParamSource] = {} +__PARAM_SOURCES_BY_NAME: dict[str, ParamSource] = {} def param_source_for_operation(op_type, track, params, task_name): @@ -947,7 +945,7 @@ def create_readers( num_clients: int, start_client_index: int, end_client_index: int, - corpora: List[track.DocumentCorpus], + corpora: list[track.DocumentCorpus], batch_size: int, bulk_size: int, id_conflicts: IndexIdConflict, @@ -955,7 +953,7 @@ def create_readers( on_conflict: str, recency: str, create_reader: Callable[..., IndexDataReader], -) -> List[IndexDataReader]: +) -> list[IndexDataReader]: """ Return a list of IndexDataReader instances to allow a range of clients to read their share of corpora. @@ -970,7 +968,7 @@ def create_readers( of the first corpus. Then I move on to the first partition of the first file of the second corpus, and so on. """ - corpora_readers: List[Deque[IndexDataReader]] = [] + corpora_readers: list[Deque[IndexDataReader]] = [] total_readers = 0 # stagger which corpus each client starts with for better parallelism (see 1. above) start_corpora_id = start_client_index % len(corpora) @@ -991,7 +989,7 @@ def create_readers( corpora_readers.append(reader_queue) # Stagger which files will be read (see 2. above) - staggered_readers: List[IndexDataReader] = [] + staggered_readers: list[IndexDataReader] = [] while total_readers > 0: for reader_queue in corpora_readers: # Since corpora don't necessarily contain the same number of documents, we
refactor: rename ZEPHYR_MIRROR_BUGDOWN_KEY and DEFAULT_BUGDOWN_KEY rename ZEPHYR_MIRROR_BUGDOWN_KEY to ZEPHYR_MIRROR_MARKDOWN_KEY and DEFAULT_BUGDOWN_KEY tp DEFAULT_MARKDOWN_KEY. This commit is part of series of commits aimed at renaming bugdown to markdown.
@@ -1828,8 +1828,8 @@ def get_sub_registry(r: markdown.util.Registry, keys: List[str]) -> markdown.uti # These are used as keys ("realm_filters_keys") to md_engines and the respective # realm filter caches -DEFAULT_BUGDOWN_KEY = -1 -ZEPHYR_MIRROR_BUGDOWN_KEY = -2 +DEFAULT_MARKDOWN_KEY = -1 +ZEPHYR_MIRROR_MARKDOWN_KEY = -2 class Markdown(markdown.Markdown): def __init__(self, *args: Any, **kwargs: Union[bool, int, List[Any]]) -> None: @@ -1988,7 +1988,7 @@ class Markdown(markdown.Markdown): return default def handle_zephyr_mirror(self) -> None: - if self.getConfig("realm") == ZEPHYR_MIRROR_BUGDOWN_KEY: + if self.getConfig("realm") == ZEPHYR_MIRROR_MARKDOWN_KEY: # Disable almost all inline patterns for zephyr mirror # users' traffic that is mirrored. Note that # inline_interesting_links is a treeprocessor and thus is @@ -2069,13 +2069,13 @@ def maybe_update_markdown_engines(realm_filters_key: Optional[int], email_gatewa global realm_filter_data if realm_filters_key is None: all_filters = all_realm_filters() - all_filters[DEFAULT_BUGDOWN_KEY] = [] + all_filters[DEFAULT_MARKDOWN_KEY] = [] for realm_filters_key, filters in all_filters.items(): realm_filter_data[realm_filters_key] = filters make_md_engine(realm_filters_key, email_gateway) # Hack to ensure that getConfig("realm") is right for mirrored Zephyrs - realm_filter_data[ZEPHYR_MIRROR_BUGDOWN_KEY] = [] - make_md_engine(ZEPHYR_MIRROR_BUGDOWN_KEY, False) + realm_filter_data[ZEPHYR_MIRROR_MARKDOWN_KEY] = [] + make_md_engine(ZEPHYR_MIRROR_MARKDOWN_KEY, False) else: realm_filters = realm_filters_for_realm(realm_filters_key) if realm_filters_key not in realm_filter_data or \ @@ -2278,7 +2278,7 @@ def do_convert(content: str, if message_realm is None: message_realm = message.get_realm() if message_realm is None: - realm_filters_key = DEFAULT_BUGDOWN_KEY + realm_filters_key = DEFAULT_MARKDOWN_KEY else: realm_filters_key = message_realm.id @@ -2292,7 +2292,7 @@ def do_convert(content: str, if message.sending_client.name == "zephyr_mirror": # Use slightly customized Markdown processor for content # delivered via zephyr_mirror - realm_filters_key = ZEPHYR_MIRROR_BUGDOWN_KEY + realm_filters_key = ZEPHYR_MIRROR_MARKDOWN_KEY maybe_update_markdown_engines(realm_filters_key, email_gateway) md_engine_key = (realm_filters_key, email_gateway) @@ -2300,10 +2300,10 @@ def do_convert(content: str, if md_engine_key in md_engines: _md_engine = md_engines[md_engine_key] else: - if DEFAULT_BUGDOWN_KEY not in md_engines: + if DEFAULT_MARKDOWN_KEY not in md_engines: maybe_update_markdown_engines(realm_filters_key=None, email_gateway=False) - _md_engine = md_engines[(DEFAULT_BUGDOWN_KEY, email_gateway)] + _md_engine = md_engines[(DEFAULT_MARKDOWN_KEY, email_gateway)] # Reset the parser; otherwise it will get slower over time. _md_engine.reset()
Correct usb_status rather than status Correct signal, status bar should be usb info, not last info sent from the board.
@@ -857,7 +857,7 @@ class MeerK40t(wx.Frame, Module, Job): def on_active_change(self, old_active, context_active): if old_active is not None: old_active.unlisten('pipe;error', self.on_usb_error) - old_active.unlisten("pipe;status", self.on_usb_state_text) + old_active.unlisten("pipe;usb_status", self.on_usb_state_text) old_active.unlisten('pipe;thread', self.on_pipe_state) old_active.unlisten('spooler;thread', self.on_spooler_state) old_active.unlisten('interpreter;position', self.update_position) @@ -865,7 +865,7 @@ class MeerK40t(wx.Frame, Module, Job): old_active.unlisten('bed_size', self.bed_changed) if context_active is not None: context_active.listen('pipe;error', self.on_usb_error) - context_active.listen("pipe;status", self.on_usb_state_text) + context_active.listen("pipe;usb_status", self.on_usb_state_text) context_active.listen('pipe;thread', self.on_pipe_state) context_active.listen('spooler;thread', self.on_spooler_state) context_active.listen('interpreter;position', self.update_position)
Update ensemble_copula_coupling_constants.py 10 km is a silly upper bound for visibility
@@ -62,5 +62,5 @@ bounds_for_ecdf = { "rainfall_rate_in_vicinity": bounds((0, 0.00003), "m s-1"), "lwe_snowfall_rate": bounds((0, 0.00001), "m s-1"), "lwe_snowfall_rate_in_vicinity": bounds((0, 0.00001), "m s-1"), - "visibility_in_air": bounds((0, 10000), "m") + "visibility_in_air": bounds((0, 100000), "m") }
Add --trials alias for --max-trials In many cases it makes more sense to spell max trials simply as `--trials` rather than the more pedantic `--max-trials`. These are equivalent.
@@ -278,7 +278,7 @@ def run_params(fn): help="Flag for OPTIMIZER. May be used multiple times.", ), click.Option( - ("-m", "--max-trials"), + ("-m", "--max-trials", "--trials"), metavar="N", type=click.IntRange(1, None), help=(
MAINT: Revise comment in numpy.core._dtype.py Replace append_metastr_to_string by metastr_to_unicode.
@@ -176,7 +176,7 @@ def _byte_order_str(dtype): def _datetime_metadata_str(dtype): - # TODO: this duplicates the C append_metastr_to_string + # TODO: this duplicates the C metastr_to_unicode functionality unit, count = np.datetime_data(dtype) if unit == 'generic': return ''
[dagit] Add optional authorization header to HTTP requests Summary: Used in conjunction with D7805 Test Plan: manual run in dagit & network tab confirms that the "authorization" header is present Reviewers: dgibson, dish, bengotow
@@ -4,7 +4,7 @@ import '@blueprintjs/select/lib/css/blueprint-select.css'; import '@blueprintjs/table/lib/css/table.css'; import '@blueprintjs/popover2/lib/css/blueprint-popover2.css'; -import {split, ApolloLink, ApolloClient, ApolloProvider, HttpLink} from '@apollo/client'; +import {concat, split, ApolloLink, ApolloClient, ApolloProvider, HttpLink} from '@apollo/client'; import {WebSocketLink} from '@apollo/client/link/ws'; import {getMainDefinition} from '@apollo/client/utilities'; import {Colors} from '@blueprintjs/core'; @@ -73,11 +73,12 @@ interface Props { graphqlURI: string; basePath?: string; subscriptionParams?: {[key: string]: string}; + headerAuthToken?: string; }; } export const AppProvider: React.FC<Props> = (props) => { - const {basePath = '', subscriptionParams = {}, graphqlURI} = props.config; + const {headerAuthToken = '', basePath = '', subscriptionParams = {}, graphqlURI} = props.config; const httpGraphqlURI = graphqlURI.replace('wss://', 'https://').replace('ws://', 'http://'); @@ -110,7 +111,22 @@ export const AppProvider: React.FC<Props> = (props) => { return forward(operation); }); - const httpLink = new HttpLink({uri: httpURI}); + let httpLink: ApolloLink = new HttpLink({uri: httpURI}); + + // add an auth header to the HTTP requests made by the app + // note that the websocket-based subscriptions will not carry this header + if (headerAuthToken) { + const authMiddleware = new ApolloLink((operation, forward) => { + operation.setContext({ + headers: { + authorization: headerAuthToken, + }, + }); + + return forward(operation); + }); + httpLink = concat(authMiddleware, httpLink); + } const websocketLink = new WebSocketLink(websocketClient);
also get scaler and training data path from dlhub_predictor_dict. Temporary until can fix errors with getting from model servable on DLHub
@@ -97,15 +97,17 @@ def make_prediction(dlhub_servable, prediction_data, scaler_path, training_data_ """ # Featurize the prediction data + print('Starting featurizing') compositions, X_test = featurize_mastml(prediction_data, scaler_path, training_data_path, exclude_columns) - + print('Done featurizing') # Run the predictions on the DLHub server dl = DLHubClient() # Ryan Chard: it seems this needs to be changed to something like what is commented below: # model = joblib.load(servable['dlhub']['files']['model']) #y_pred_new = model.predict(X_test) + print('Running predictions') y_pred_new = dl.run(name=dlhub_servable, inputs=X_test.tolist()) - + print('Done getting predictions') pred_dict = dict() for comp, pred in zip(compositions, y_pred_new.tolist()): pred_dict[comp] = pred @@ -115,7 +117,8 @@ def make_prediction(dlhub_servable, prediction_data, scaler_path, training_data_ df_pred.to_excel('new_material_predictions.xlsx') return pred_dict -def run(dlhub_servable, prediction_data): +def run(dlhub_predictor_dict): + # dlhub_predictor_dict: dict containing the following two keys: # dlhub_servable: the servable name. This is needed because it runs dlhub.run() internally to make the model inference. # For this example, use 'rjacobs3_wisc/Bandgap_GW_2020_04_20' # prediction_data: the material composition to be featurized and predicted. This is what we would like the new input @@ -129,7 +132,13 @@ def run(dlhub_servable, prediction_data): # exclude_columns: Other column names that are in the "selected.csv" file but not used in featurization. Just hard # coded for now, will make general later if this works as expected + dlhub_servable = dlhub_predictor_dict['dlhub_servable'] + prediction_data = dlhub_predictor_dict['prediction_data'] + scaler_path = dlhub_predictor_dict['scaler_path'] + training_data_path = dlhub_predictor_dict['training_data_path'] servable = DLHubClient().describe_servable(dlhub_servable) - scaler_path = servable['dlhub']['files']['other'][0] - training_data_path = servable['dlhub']['files']['other'][1] + + # TODO: need to get preprocessor and training data info from servable, but this is currently giving FileNotFound errors + #scaler_path = '/Users/ryanjacobs/'+servable['dlhub']['files']['other'][0] + #training_data_path = '/Users/ryanjacobs/'+servable['dlhub']['files']['other'][1] pred_dict = make_prediction(dlhub_servable, prediction_data, scaler_path, training_data_path, exclude_columns=['composition', 'band_gap']) \ No newline at end of file
Adding timeout to windows and local ubuntu jobs. Adding timeout.
@@ -465,6 +465,7 @@ jobs: build_test_ubuntu: name: Local Unit Testing on Ubuntu runs-on: ubuntu-latest + timeout-minutes: 20 container: image: ghcr.io/pyansys/mapdl:v22.2-ubuntu options: "--entrypoint /bin/bash" @@ -529,6 +530,7 @@ jobs: test_windows: name: Unit Testing on Windows runs-on: [self-hosted, Windows, pymapdl] + timeout-minutes: 30 steps: - uses: actions/checkout@v3
Add SlidingFeaturesNodeGenerator to documentation See:
@@ -22,7 +22,7 @@ Generators ----------- .. automodule:: stellargraph.mapper - :members: Generator, FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator, DirectedGraphSAGENodeGenerator, DirectedGraphSAGELinkGenerator, ClusterNodeGenerator, GraphSAGELinkGenerator, HinSAGENodeGenerator, HinSAGELinkGenerator, Attri2VecNodeGenerator, Attri2VecLinkGenerator, Node2VecNodeGenerator, Node2VecLinkGenerator, RelationalFullBatchNodeGenerator, AdjacencyPowerGenerator, GraphWaveGenerator, CorruptedGenerator, PaddedGraphGenerator, KGTripleGenerator + :members: Generator, FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator, DirectedGraphSAGENodeGenerator, DirectedGraphSAGELinkGenerator, ClusterNodeGenerator, GraphSAGELinkGenerator, HinSAGENodeGenerator, HinSAGELinkGenerator, Attri2VecNodeGenerator, Attri2VecLinkGenerator, Node2VecNodeGenerator, Node2VecLinkGenerator, RelationalFullBatchNodeGenerator, AdjacencyPowerGenerator, GraphWaveGenerator, CorruptedGenerator, PaddedGraphGenerator, KGTripleGenerator, SlidingFeaturesNodeGenerator Layers and models
allow broadcasting rules in F+0 operation if dim(F)=1 and dim(Zero)=D, then F+Zero returns F concatenated D times (via SumT operation)
@@ -138,22 +138,22 @@ struct Add_Alias { // A + 0 = A template < class FA, int DIM > struct Add_Alias< FA, Zero< DIM>> { - static_assert(DIM == FA::DIM, "Dimensions must be the same for Add"); - using type = FA; + static_assert((DIM == FA::DIM)||(DIM==1)||(FA::DIM==1), "Incompatible dimensions for Add"); + using type = CondType<SumT<FA,DIM>,FA,FA::DIM==1>; }; // 0 + B = B template < class FB, int DIM > struct Add_Alias< Zero< DIM >, FB > { - static_assert(DIM == FB::DIM, "Dimensions must be the same for Add"); - using type = FB; + static_assert((DIM == FB::DIM)||(DIM==1)||(FB::DIM==1), "Incompatible dimensions for Add"); + using type = CondType<SumT<FB,DIM>,FB,FB::DIM==1>; }; // 0 + 0 = la tete a Toto template < int DIM1, int DIM2 > struct Add_Alias< Zero< DIM1 >, Zero< DIM2>> { - static_assert(DIM1 == DIM2, "Dimensions must be the same for Add"); - using type = Zero< DIM1 >; + static_assert((DIM1 == DIM1)||(DIM1==1)||(DIM2==1), "Incompatible dimensions for Add"); + using type = Zero < ::std::max(DIM1,DIM2) >; }; // m+n = m+n
Lazy load param_dict and use symbol name Only load a param_hash if we actually need it. Also, use a symbol's name rather than it's str(), which invokes a bunch of custom sympy Printer nonsense. Speeds a sample param resolution by almost 100x. (see comment below for details)
@@ -46,13 +46,13 @@ class ParamResolver(object): return super().__new__(cls) def __init__(self, param_dict: ParamResolverOrSimilarType = None) -> None: - if hasattr(self, '_param_hash'): + if hasattr(self, 'param_dict'): return # Already initialized. Got wrapped as part of the __new__. + self._param_hash = None self.param_dict = cast( Dict[Union[str, sympy.Symbol], Union[float, str, sympy.Symbol]], {} if param_dict is None else param_dict) - self._param_hash = hash(frozenset(self.param_dict.items())) def value_of(self, value: Union[sympy.Basic, float, str]) -> value.TParamVal: @@ -79,7 +79,6 @@ class ParamResolver(object): Returns: The value of the parameter as resolved by this resolver. """ - # Input is a float, no resolution needed: return early if isinstance(value, float): return value @@ -102,8 +101,8 @@ class ParamResolver(object): # Input is a symbol (sympy.Symbol('a')) and its string maps to a number # in the dictionary ({'a': 1.0}). Return it. - if isinstance(value, sympy.Symbol) and str(value) in self.param_dict: - param_value = self.param_dict[str(value)] + if (isinstance(value, sympy.Symbol) and value.name in self.param_dict): + param_value = self.param_dict[value.name] if isinstance(param_value, (float, int)): return param_value @@ -133,6 +132,8 @@ class ParamResolver(object): return self.value_of(key) def __hash__(self): + if self._param_hash is None: + self._param_hash = hash(frozenset(self.param_dict.items())) return self._param_hash def __eq__(self, other):
Fix - simple remote README isn't using remote private keys Fix Changes: use remote private keys; and rename user's config.ini -> myconfig.ini
@@ -26,7 +26,7 @@ From [get-test-MATIC](get-test-MATIC.md), do: ### Create Config File for Services -In your working directory, create a file `config.ini` and fill it with the following. It will use pre-existing services running for mumbai testnet. +In your working directory, create a file `myconfig.ini` and fill it with the following. It will use pre-existing services running for mumbai testnet. ```text [eth-network] @@ -45,14 +45,14 @@ provider.url = https://v4.provider.mumbai.oceanprotocol.com In the console: ```console # For services: point to config file -export OCEAN_CONFIG_FILE=config.ini +export OCEAN_CONFIG_FILE=myconfig.ini # For services: ensure no other envvars that override config file values unset OCEAN_NETWORK_URL METADATA_CACHE_URI AQUARIUS_URL PROVIDER_URL # For accounts: set private keys -export TEST_PRIVATE_KEY1=<your TEST_PRIVATE_KEY1> -export TEST_PRIVATE_KEY2=<your TEST_PRIVATE_KEY2> +export REMOTE_TEST_PRIVATE_KEY1=<your REMOTE_TEST_PRIVATE_KEY1> +export REMOTE_TEST_PRIVATE_KEY2=<your REMOTE_TEST_PRIVATE_KEY2> ``` ### Setup in Python @@ -76,7 +76,7 @@ ocean = Ocean(config) # Create Alice's wallet import os from ocean_lib.web3_internal.wallet import Wallet -alice_private_key = os.getenv('TEST_PRIVATE_KEY1') +alice_private_key = os.getenv('REMOTE_TEST_PRIVATE_KEY1') alice_wallet = Wallet(ocean.web3, alice_private_key, config.block_confirmations, config.transaction_timeout) ```
llvm/execution: Force 'additional_tags' argument to be keyword only Fixes:
@@ -241,7 +241,7 @@ class MechExecution(FuncExecution): class CompExecution(CUDAExecution): - def __init__(self, composition, execution_ids=[None], additional_tags=frozenset()): + def __init__(self, composition, execution_ids=[None], *, additional_tags=frozenset()): super().__init__(buffers=['state_struct', 'param_struct', 'data_struct', 'conditions']) self._composition = composition self._execution_contexts = [
CI: another try to fix code cov Put the file to the same state that was used when the pipeline passed.
@@ -32,7 +32,6 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install -r requirements.txt python -m pip install -r requirements/ci.txt python -m pip install -e .
lint: flake8: B020 issue flake8 output: lona/html/attribute_dict.py:93:22: B020 Found for loop that reassigns the iterable it is iterating with each iterable value.
@@ -90,8 +90,8 @@ class AttributeDict: raise ValueError('dict required') with self._node.lock: - for key, value in value.items(): - self[key] = value + for key, _value in value.items(): + self[key] = _value def __getitem__(self, name): with self._node.lock:
Update README.rst This PR will: correct invalid module qualifiers with `pika.connection.Connection`; use high-level module qualifiers `pika.BaseConnection`; embed URL in AsyncIO, Tornado and Twisted; remove unnecessary reST markup for plain URL
@@ -21,8 +21,7 @@ RabbitMQ's extensions. Documentation ------------- -Pika's documentation can be found at -`https://pika.readthedocs.io <https://pika.readthedocs.io>`_. +Pika's documentation can be found at https://pika.readthedocs.io. Example ------- @@ -66,15 +65,16 @@ Pika provides the following adapters ------------------------------------ - ``pika.adapters.asyncio_connection.AsyncioConnection`` - asynchronous adapter - for the Python 3 AsyncIO I/O loop. + for Python 3 `AsyncIO <https://docs.python.org/3/library/asyncio.html>`_'s + I/O loop. - ``pika.BlockingConnection`` - synchronous adapter on top of library for simple usage. - ``pika.SelectConnection`` - asynchronous adapter without third-party dependencies. - ``pika.adapters.tornado_connection.TornadoConnection`` - asynchronous adapter - for use with the Tornado I/O loop http://tornadoweb.org. + for use with `Tornado <http://tornadoweb.org>`_'s I/O loop. - ``pika.adapters.twisted_connection.TwistedProtocolConnection`` - asynchronous - adapter for use with the Twisted I/O loop http://twistedmatrix.com. + adapter for use with `Twisted <http://twistedmatrix.com>`_'s I/O loop. Multiple connection parameters ------------------------------ @@ -178,7 +178,7 @@ for connection errors. Here is a very basic example: while True: try: - connection = pika.BlockingConnection(parameters) + connection = pika.BlockingConnection() channel = connection.channel() channel.basic_consume('test', on_message_callback) channel.start_consuming() @@ -206,7 +206,7 @@ retries and limiting the number of retries: @retry(pika.exceptions.AMQPConnectionError, delay=5, jitter=(1, 3)) def consume(): - connection = pika.BlockingConnection(parameters) + connection = pika.BlockingConnection() channel = connection.channel() channel.basic_consume('test', on_message_callback) @@ -246,17 +246,16 @@ Extending to support additional I/O frameworks ---------------------------------------------- New non-blocking adapters may be implemented in either of the following ways: -- By subclassing ``pika.adapters.base_connection.BaseConnection`` and - implementing its abstract method(s) and passing ``pika.BaseConnection``'s - constructor an implementation of +- By subclassing ``pika.BaseConnection``, implementing its abstract method and + passing its constructor an implementation of ``pika.adapters.utils.nbio_interface.AbstractIOServices``. - ``pika.BaseConnection`` implements `pika.connection.connection.Connection`'s - pure virtual methods, including internally-initiated connection logic. For + ``pika.BaseConnection`` implements ``pika.connection.Connection``'s pure + virtual methods, including internally-initiated connection logic. For examples, refer to the implementations of ``pika.adapters.asyncio_connection.AsyncioConnection`` and ``pika.adapters.tornado_connection.TornadoConnection``. -- By subclassing ``pika.connection.connection.Connection`` and implementing its - abstract method(s). This approach facilitates implementation of custom +- By subclassing ``pika.connection.Connection`` and implementing its abstract + methods. This approach facilitates implementation of custom connection-establishment and transport mechanisms. For an example, refer to the implementation of ``pika.adapters.twisted_connection.TwistedProtocolConnection``.