message
stringlengths
13
484
diff
stringlengths
38
4.63k
ENH: app.io.write_db now accepts just python data structures [NEW] built-in types that can be converted to json are allowed
@@ -505,8 +505,11 @@ class write_db(_checkpointable): def write(self, data, identifier=None): if identifier is None: identifier = self._make_output_identifier(data) - out = data.to_json() - stored = self.data_store.write(identifier, out) + try: + data = data.to_json() + except AttributeError: + data = json.dumps(data) + stored = self.data_store.write(identifier, data) try: data.info.stored = stored except AttributeError:
Track global failures flag for YAML validations Closes-bug:
@@ -831,6 +831,7 @@ def validate(filename, param_map): }, ... ]} + Returns a global retval that indicates any failures had been in the check progress. """ if args.quiet < 1: print('Validating %s' % filename) @@ -867,23 +868,23 @@ def validate(filename, param_map): if VALIDATE_PUPPET_OVERRIDE.get(filename, False) or ( filename.startswith('./puppet/services/') and VALIDATE_PUPPET_OVERRIDE.get(filename, True)): - retval = validate_service(filename, tpl) + retval |= validate_service(filename, tpl) if re.search(r'(puppet|docker)\/services', filename): - retval = validate_service_hiera_interpol(filename, tpl) + retval |= validate_service_hiera_interpol(filename, tpl) if filename.startswith('./docker/services/logging/'): - retval = validate_docker_logging_template(filename, tpl) + retval |= validate_docker_logging_template(filename, tpl) elif VALIDATE_DOCKER_OVERRIDE.get(filename, False) or ( filename.startswith('./docker/services/') and VALIDATE_DOCKER_OVERRIDE.get(filename, True)): - retval = validate_docker_service(filename, tpl) + retval |= validate_docker_service(filename, tpl) if filename.endswith('hyperconverged-ceph.yaml'): - retval = validate_hci_compute_services_default(filename, tpl) + retval |= validate_hci_compute_services_default(filename, tpl) if filename.startswith('./roles/ComputeHCI.yaml'): - retval = validate_hci_computehci_role(filename, tpl) + retval |= validate_hci_computehci_role(filename, tpl) if filename.startswith('./roles/ComputeOvsDpdk.yaml') or \ filename.startswith('./roles/ComputeSriov.yaml') or \ @@ -896,29 +897,32 @@ def validate(filename, param_map): 'OS::TripleO::Services::NeutronVppAgent', 'OS::TripleO::Services::Vpp', 'OS::TripleO::Services::NeutronLinuxbridgeAgent'] - retval = validate_with_compute_role_services(filename, tpl, exclude) + retval |= validate_with_compute_role_services(filename, tpl, exclude) if filename.startswith('./roles/ComputeRealTime.yaml'): exclude = [ 'OS::TripleO::Services::Tuned', ] - retval = validate_with_compute_role_services(filename, tpl, exclude) + retval |= validate_with_compute_role_services(filename, tpl, exclude) if filename.startswith('./roles/Hci'): - retval = validate_hci_role(filename, tpl) + retval |= validate_hci_role(filename, tpl) if filename.startswith('./roles/Ceph'): - retval = validate_ceph_role(filename, tpl) + retval |= validate_ceph_role(filename, tpl) if filename.startswith('./roles/ControllerNoCeph.yaml'): - retval = validate_controller_no_ceph_role(filename, tpl) + retval |= validate_controller_no_ceph_role(filename, tpl) if filename.startswith('./network_data_'): - retval = validate_network_data_file(filename) + result = validate_network_data_file(filename) + retval |= result + else: + result = retval - if retval == 0 and is_heat_template: + if result == 0 and is_heat_template: # check for old style nic config files - retval = validate_nic_config_file(filename, tpl) + retval |= validate_nic_config_file(filename, tpl) except Exception: print(traceback.format_exc())
Remove future plan from portgroup document This commit removes "future plan" for portgroup to avoid misunderstanding.
@@ -16,9 +16,7 @@ configured on the switch have to correspond to the mode and properties that will be configured on the ironic side, as bonding mode and properties may be named differently on your switch, or have possible values different from the ones described in `kernel documentation on bonding`_. Please refer to your -switch configuration documentation for more details. In the future, we may -pass the port group mode and properties to ML2 drivers so that they can do the -configuration automatically, but it is not supported as of now. +switch configuration documentation for more details. Provisioning and cleaning cannot make use of port groups if they need to boot the deployment ramdisk via (i)PXE. If your switches or desired port group
Use instance attribute for terminators If the `terminators` is the default, `None`, the function fails because the local variable was not updated.
@@ -163,7 +163,7 @@ class StatementParser: invalid_command_chars = [] invalid_command_chars.extend(constants.QUOTES) invalid_command_chars.extend(constants.REDIRECTION_CHARS) - invalid_command_chars.extend(terminators) + invalid_command_chars.extend(self.terminators) # escape each item so it will for sure get treated as a literal second_group_items = [re.escape(x) for x in invalid_command_chars] # add the whitespace and end of string, not escaped because they
AutoPropertiesDSL: strip :param:-like directives from user documentation TN:
@@ -17,6 +17,18 @@ class AutoPropertiesDSL(docutils.parsers.rst.Directive): Directive to generate a definition list for all DSL constructors. """ + def _prepare_docstring(self, docstring): + """ + Remove anything that appears after a line that starts with ":". This + makes it possible to remove directives like ":param XXX:" which are + intended for Langkit developpers, not for users. + """ + result = prepare_docstring(docstring) + for i, line in enumerate(result): + if line.startswith(':'): + return '\n'.join(result[:i]).rstrip().split('\n') + return result + def _parse(self, strlist, dest_block): self.state.nested_parse(StringList(strlist), 0, dest_block) @@ -50,7 +62,7 @@ class AutoPropertiesDSL(docutils.parsers.rst.Directive): definition = nodes.definition() doc = attr_expr.doc or '*Not yet documented*' - self._parse(prepare_docstring(doc), definition) + self._parse(self._prepare_docstring(doc), definition) def_list_item.append(target_node) def_list_item.append(term)
Reference logo by full URL Apparently, readthedocs.io is unable to use an automatically generated link to the logo if we use a relative reference. Using the full URL should work for both GH and readthedocs.io.
HeAT - Helmholtz Analytics Toolkit ================================== -![HeAT Logo](doc/images/logo_HeAT.png) +![HeAT Logo](https://raw.githubusercontent.com/helmholtz-analytics/heat/master/doc/images/logo_HeAT.png) HeAT is a distributed tensor framework for high performance data analytics.
Handle arrays filled with identical values Avoid possible repeated function call
@@ -82,7 +82,7 @@ def analyze(problem, X, Y, num_resamples=10, except np.linalg.LinAlgError as e: msg = "Singular matrix detected\n" msg += "This may be due to the sample size ({}) being too small\n".format(Y.size) - msg += "If this is not the case, please raise an issue with the\n" + msg += "If this is not the case, check Y values or raise an issue with the\n" msg += "SALib team" raise np.linalg.LinAlgError(msg) @@ -95,6 +95,7 @@ def analyze(problem, X, Y, num_resamples=10, def calc_delta(Y, Ygrid, X, m): N = len(Y) fy = gaussian_kde(Y, bw_method='silverman')(Ygrid) + abs_fy = np.abs(fy) xr = rankdata(X, method='ordinal') d_hat = 0 @@ -102,13 +103,14 @@ def calc_delta(Y, Ygrid, X, m): ix = np.where((xr > m[j]) & (xr <= m[j + 1]))[0] nm = len(ix) - if Y[ix].any(): - fyc = gaussian_kde(Y[ix], bw_method='silverman')(Ygrid) - abs_fy = np.abs(fy - fyc) + Y_ix = Y[ix] + if not np.all(np.equal(Y_ix, Y_ix[0])): + fyc = gaussian_kde(Y_ix, bw_method='silverman')(Ygrid) + fy_ = np.abs(fy - fyc) else: - abs_fy = np.abs(fy) + fy_ = abs_fy - d_hat += (nm / (2 * N)) * np.trapz(abs_fy, Ygrid) + d_hat += (nm / (2 * N)) * np.trapz(fy_, Ygrid) return d_hat @@ -119,8 +121,9 @@ def bias_reduced_delta(Y, Ygrid, X, m, num_resamples, conf_level): d = np.zeros(num_resamples) d_hat = calc_delta(Y, Ygrid, X, m) + N = len(Y) for i in range(num_resamples): - r = np.random.randint(len(Y), size=len(Y)) + r = np.random.randint(N, size=N) d[i] = calc_delta(Y[r], Ygrid, X[r], m) d = 2 * d_hat - d @@ -141,8 +144,9 @@ def sobol_first(Y, X, m): def sobol_first_conf(Y, X, m, num_resamples, conf_level): s = np.zeros(num_resamples) + N = len(Y) for i in range(num_resamples): - r = np.random.randint(len(Y), size=len(Y)) + r = np.random.randint(N, size=N) s[i] = sobol_first(Y[r], X[r], m) return norm.ppf(0.5 + conf_level / 2) * s.std(ddof=1)
Fixed a typo in CubicSpline docstring Original text in the note section of CubicSpline docstring: "Parameters `bc_type` and ``interpolate`` work independently" Note that CubicSpline does not have a parameter `interpolate`, but `extrapolate`. It should be a typo?
@@ -548,7 +548,7 @@ class CubicSpline(CubicHermiteSpline): Notes ----- - Parameters `bc_type` and ``interpolate`` work independently, i.e. the + Parameters `bc_type` and ``extrapolate`` work independently, i.e. the former controls only construction of a spline, and the latter only evaluation.
fix: Set owner & creation if new Document via
@@ -396,6 +396,7 @@ class Document(BaseDocument): "parenttype": self.doctype, "parentfield": fieldname }) + def get_doc_before_save(self): return getattr(self, '_doc_before_save', None) @@ -468,9 +469,11 @@ class Document(BaseDocument): self._original_modified = self.modified self.modified = now() self.modified_by = frappe.session.user - if not self.creation: + + # We'd probably want the creation and owner to be set via API + # or Data import at some point, that'd have to be handled here + if self.is_new(): self.creation = self.modified - if not self.owner: self.owner = self.modified_by for d in self.get_all_children():
Fix tests which now rely on the "view" tables Shouldn't the same tables that exist in production exist everywhere in our tests?
@@ -7,6 +7,7 @@ from pyquery import PyQuery as pq from django.conf import settings from django.core import mail +from django.db import connection from django.http import QueryDict from django.test import TestCase @@ -340,6 +341,15 @@ class TestFrontendHomepageViews(TestCase): category='prescribing', current_at='2015-09-01' ) + view_create = 'frontend/management/commands/replace_matviews.sql' + fixture = 'frontend/tests/fixtures/api_test_data.sql' + with connection.cursor() as cursor: + with open(view_create, 'r') as f: + # Creates the view tables + cursor.execute(f.read()) + with open(fixture, 'r') as f: + # Fills them with test data + cursor.execute(f.read()) def test_call_view_ccg_homepage(self): response = self.client.get('/ccg/02Q/')
fix: remove tab \t and newlines \n from start of query and remove from middle # Conflicts: # frappe/database/database.py
@@ -116,8 +116,14 @@ class Database(object): """ query = str(query) +<<<<<<< HEAD if not run: return query +======= + + # remove \n \t from start of query and replace them with space anywhere in middle + query = re.sub(r'\s', ' ', query).lstrip() +>>>>>>> ac5effc7dd (fix: remove tab \t and newlines \n from start of query and remove from middle) if re.search(r'ifnull\(', query, flags=re.IGNORECASE): # replaces ifnull in query with coalesce
Update data export for new Heroku CLI heroku pg:backups capture vs. heroku pg:backups:capture
@@ -614,13 +614,22 @@ def dump_database(id): os.makedirs(dump_dir) try: - subprocess.check_call([ + FNULL = open(os.devnull, 'w') + subprocess.call([ "heroku", "pg:backups", "capture" "--app", app_name(id) - ]) + ], stdout=FNULL, stderr=FNULL) + + subprocess.call([ # for more recent versions of Heroku CLI. + "heroku", + "pg:backups:capture", + "--app", + app_name(id) + ], stdout=FNULL, stderr=FNULL) + except Exception: pass
Fix a system test TODO This should prevent the (as of yet, unexperienced) error case where the API deletes the acked messages before we have a chance to seek to them.
@@ -360,7 +360,8 @@ class TestPubsub(unittest.TestCase): self.to_delete.append(topic) SUBSCRIPTION_NAME = 'subscribing-to-seek' + unique_resource_id('-') - subscription = topic.subscription(SUBSCRIPTION_NAME) + subscription = topic.subscription( + SUBSCRIPTION_NAME, retain_acked_messages=True) self.assertFalse(subscription.exists()) subscription.create() self.to_delete.append(subscription)
Update cli messages module Add new error messages. Change 'TestRun.fail' to 'TestRun.LOGGER.error'.
@@ -25,10 +25,23 @@ stop_cache_incomplete = [ r"Cache is in incomplete state - at least one core is inactive" ] +add_cached_core = [ + r"Error while adding core device to cache instance \d+", + r"Core device \'/dev/\S+\' is already cached\." +] + +remove_mounted_core = [ + r"Can\'t remove core \d+ from cache \d+\. Device /dev/cas\d+-\d+ is mounted\!" +] + +stop_cache_mounted_core = [ + r"Can\'t stop cache instance \d+\. Device /dev/cas\d+-\d+ is mounted\!" +] + def check_msg(output: Output, expected_messages): result = '\n'.join([output.stdout, output.stderr]) for msg in expected_messages: matches = re.search(msg, result) if not matches: - TestRun.fail(f"Message is incorrect, expected: {msg}\n actual: {result}.") + TestRun.LOGGER.error(f"Message is incorrect, expected: {msg}\n actual: {result}.")
Fix Wrong cache call for objects parented to bones
@@ -225,7 +225,7 @@ def __gather_children(blender_object, blender_scene, export_settings): parent_joint = find_parent_joint(root_joints, child.parent_bone) if not parent_joint: continue - child_node = gather_node(child, None, None, None, export_settings) + child_node = gather_node(child, None, blender_scene, None, export_settings) if child_node is None: continue blender_bone = blender_object.pose.bones[parent_joint.name]
Match: replace complex variables fiddling with SavedExpr/SequenceExpr TN:
@@ -10,8 +10,8 @@ from langkit.diagnostics import Severity, check_source_language from langkit.expressions import ( AbstractExpression, AbstractVariable, BasicExpr, BindingScope, ComputingExpr, Let, NullCheckExpr, NullExpr, PropertyDef, - ResolvedExpression, UnreachableExpr, attr_call, attr_expr, construct, - render + ResolvedExpression, SavedExpr, SequenceExpr, UnreachableExpr, attr_call, + attr_expr, construct, render ) from langkit.expressions.boolean import Eq, If, Not from langkit.expressions.envs import Env @@ -983,9 +983,7 @@ class Match(AbstractExpression): # appropriate order, so that in the end the first matchers are tested # first. for match_var, expr, inner_scope in reversed(constructed_matchers): - casted = Cast.Expr(matched_var, - match_var.type, - result_var=match_var) + casted = SavedExpr('Match', Cast.Expr(matched_var, match_var.type)) guard = Not.make_expr(Eq.make_expr(casted, NullExpr(casted.type))) if expr.type != rtype: # We already checked that type matches, so only way this is @@ -993,12 +991,13 @@ class Match(AbstractExpression): # rtype. In that case, we need an explicity upcast. expr = Cast.Expr(expr, rtype) - expr_with_scope = BindingScope(expr, [match_var], - scope=inner_scope) + expr_with_scope = BindingScope( + Let.Expr([match_var], + [casted.result_var.ref_expr], + expr), + [], + scope=inner_scope + ) result = If.Expr(guard, expr_with_scope, result, rtype) - dummy_var = PropertyDef.get().vars.create('Dummy', matched_expr.type) - dummy_resolved_var = dummy_var.ref_expr - dummy_resolved_var.set_ignored() - return Let.Expr([dummy_resolved_var], [matched_expr], result, - abstract_expr=self) + return SequenceExpr(matched_expr, result, abstract_expr=self)
Remove check for list type minorminer has already been changed to always return dicts
@@ -182,10 +182,6 @@ class EmbeddingComposite(dimod.Sampler, dimod.Composite): if bqm and not embedding: raise ValueError("no embedding found") - # this should change in later versions - if isinstance(embedding, list): - embedding = dict(enumerate(embedding)) - bqm_embedded = dimod.embed_bqm(bqm, embedding, target_adjacency) response = child.sample(bqm_embedded, **parameters)
feat: triggering a skill with alexa media player Final code for triggering a skill with alexa media player
@@ -1110,7 +1110,9 @@ class AlexaClient(MediaPlayerDevice): elif media_type == "skill": await self.alexa_api.run_skill( media_id, - queue_delay=0, + queue_delay=self.hass.data[DATA_ALEXAMEDIA]["accounts"][self.email][ + "options" + ][CONF_QUEUE_DELAY], ) else: await self.alexa_api.play_music(
streams-modal: Fix styling around stream accessibility option. This fixes the faulty spacing around the various icons in stream accessibility option under the create new stream modal. This regression was introduced in 7e71bf.
@@ -584,13 +584,13 @@ form#add_new_subscription { margin-bottom: 20px; } -.stream-creation-body #make-invite-only label span.icon-vector-globe { +.stream-creation-body #make-invite-only label span.fa-globe { margin-left: 14px; margin-right: 10px; } -.stream-creation-body #make-invite-only label span.icon-vector-lock { +.stream-creation-body #make-invite-only label span.fa-lock { margin-left: 15px; margin-right: 11px; } @@ -599,7 +599,7 @@ form#add_new_subscription { margin-top: 5px; } -.stream-creation-body #announce-new-stream div[class^="icon"] { +.stream-creation-body #announce-new-stream div[class^="fa"] { margin-left: 3px; margin-right: 8px; }
Retry dialing a peer if the connection is refused Employs exponential backoff to schedule the retries
import asyncio +import random from typing import ( Dict, Iterable, @@ -212,6 +213,8 @@ class PeerPool: def get_best_head_slot_peer(self) -> Peer: return self.get_best("head_slot") +DIAL_RETRY_COUNT = 10 + class Node(BaseService): @@ -333,6 +336,20 @@ class Node(BaseService): ) ) + async def dial_peer_with_retries(self, ip: str, port: int, peer_id: ID) -> None: + """ + Dial the peer ``peer_id`` through the IPv4 protocol + """ + for i in range(DIAL_RETRY_COUNT): + try: + # exponential backoff... + await asyncio.sleep(2**i + random.random()) + await self.dial_peer(ip, port, peer_id) + return + except ConnectionRefusedError: + continue + raise ConnectionRefusedError + async def dial_peer_maddr(self, maddr: Multiaddr) -> None: """ Parse `maddr`, get the ip:port and PeerID, and call `dial_peer` with the parameters. @@ -340,7 +357,7 @@ class Node(BaseService): ip = maddr.value_for_protocol(protocols.P_IP4) port = maddr.value_for_protocol(protocols.P_TCP) peer_id = ID.from_base58(maddr.value_for_protocol(protocols.P_P2P)) - await self.dial_peer(ip=ip, port=port, peer_id=peer_id) + await self.dial_peer_with_retries(ip=ip, port=port, peer_id=peer_id) async def connect_preferred_nodes(self) -> None: if self.preferred_nodes:
Fix documentation of MySQLPageGenerator The comma made the query invalid. Also fix grammar.
@@ -2754,7 +2754,7 @@ def MySQLPageGenerator(query, site=None, verbose=None): SELECT page_namespace, - page_title, + page_title FROM page WHERE page_namespace = 0; @@ -2766,7 +2766,7 @@ def MySQLPageGenerator(query, site=None, verbose=None): @param verbose: if True, print query to be executed; if None, config.verbose_output will be used. @type verbose: None or bool - @return: generator which yield pywikibot.Page + @return: generator which yields pywikibot.Page """ from pywikibot.data import mysql
help: Add "Edit a custom profile field" section. Adds a section on editing to the Custom profile fields help article. Fixes part of
@@ -28,6 +28,19 @@ methods][authentication-production] documentation for details. {end_tabs} +## Edit a custom profile field + +{start_tabs} + +{settings_tab|profile-field-settings} + +1. In the **Actions** column, click the **pencil** (<i class="fa fa-pencil"></i>) + icon for the profile field you want to edit. + +1. Edit profile field information as desired, and click **Save changes**. + +{end_tabs} + ## Profile field types There are several different types of fields available. @@ -58,8 +71,8 @@ checkboxes will be disabled. {settings_tab|profile-field-settings} -1. Click the **pencil** (<i class="fa fa-pencil"></i>) icon on the profile field - you want to edit. +1. In the **Actions** column, click the **pencil** (<i class="fa fa-pencil"></i>) + icon for the profile field you want to edit. 1. Toggle **Display in profile summary**.
simplify InRange based on intbounds This patch simplifies `InRange` to its argument if the bounds of the argument imply that `InRange` is a noop.
@@ -3276,6 +3276,12 @@ class InRange(Array): assert index.size == 0 or 0 <= index.min() and index.max() < length return index + def _simplified(self): + lower_length, upper_length = self.length._intbounds + lower_index, upper_index = self.index._intbounds + if 0 <= lower_index <= upper_index < lower_length: + return self.index + def _intbounds_impl(self): lower_index, upper_index = self.index._intbounds lower_length, upper_length = self.length._intbounds
"FakeDeltaGenerator" and "MockQueue" are old test utilities that are no longer used. Remove them!
@@ -24,7 +24,6 @@ from streamlit.delta_generator import DeltaGenerator from streamlit.cursor import LockedCursor, make_delta_path from streamlit.errors import DuplicateWidgetID from streamlit.errors import StreamlitAPIException -from streamlit.proto.Delta_pb2 import Delta from streamlit.proto.Element_pb2 import Element from streamlit.proto.TextArea_pb2 import TextArea from streamlit.proto.TextInput_pb2 import TextInput @@ -45,82 +44,6 @@ register_widget = functools.partial( ) -class FakeDeltaGenerator(object): - """Fake DeltaGenerator class. - - The methods in this class are specifically here as to not use the - one in the actual delta generator. This purely exists just to test the - DeltaGenerator Decorators without relying on the actual - DeltaGenerator methods. - """ - - def __init__(self): - """Constructor.""" - pass - - def __getattr__(self, name): - streamlit_methods = [ - method_name for method_name in dir(st) if callable(getattr(st, method_name)) - ] - - def wrapper(*args, **kwargs): - if name in streamlit_methods: - if self._container == "sidebar": - message = ( - "Method `%(name)s()` does not exist for " - "`st.sidebar`. Did you mean `st.%(name)s()`?" % {"name": name} - ) - else: - message = ( - "Method `%(name)s()` does not exist for " - "`DeltaGenerator` objects. Did you mean " - "`st.%(name)s()`?" % {"name": name} - ) - else: - message = "`%(name)s()` is not a valid Streamlit command." % { - "name": name - } - - raise AttributeError(message) - - return wrapper - - def fake_text(self, element, body): - """Fake text delta generator.""" - element.text.body = str(body) - - def fake_dataframe(self, arg0, data=None): - """Fake dataframe.""" - return (arg0, data) - - def fake_text_raise_exception(self, element, body): - """Fake text that raises exception.""" - raise Exception("Exception in fake_text_raise_exception") - - def exception(self, e): - """Create fake exception handler. - - The real DeltaGenerator exception is more complicated. We use - this so _with_element can find the exception method. The real - exception method will be tested later on. - """ - self._exception_msg = str(e) - - def _enqueue(self, delta_type, element_proto): - delta = Delta() - el_proto = getattr(delta.new_element, delta_type) - el_proto.CopyFrom(element_proto) - return delta - - -class MockQueue(object): - def __init__(self): - self._deltas = [] - - def __call__(self, data): - self._deltas.append(data) - - class DeltaGeneratorTest(testutil.DeltaGeneratorTestCase): """Test streamlit.delta_generator methods."""
[doc] enhance admin/configuration/api.rst enhance doc including remove 'nova-api' daemon which is deprecated to use wsgi instead, and added some operations for password response.
Compute API configuration ========================= -The Compute API, run by the ``nova-api`` daemon, is the component of OpenStack -Compute that receives and responds to user requests, whether they be direct API -calls, or via the CLI tools or dashboard. +The Compute API, is the component of OpenStack Compute that receives and +responds to user requests, whether they be direct API calls, or via the CLI +tools or dashboard. Configure Compute API password handling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The OpenStack Compute API enables users to specify an administrative password -when they create or rebuild a server instance. If the user does not specify a -password, a random password is generated and returned in the API response. +when they create, rebuild, rescue or evacuate a server instance. +If the user does not specify a password, a random password is generated +and returned in the API response. In practice, how the admin password is handled depends on the hypervisor in use and might require additional configuration of the instance. For example, you @@ -20,6 +21,6 @@ hypervisor and instance configuration do not support setting a password at server create time, the password that is returned by the create API call is misleading because it was ignored. -To prevent this confusion, use the ``enable_instance_password`` configuration -option to disable the return of the admin password for installations that do -not support setting instance passwords. +To prevent this confusion, set the ``enable_instance_password`` configuration +to ``False`` to disable the return of the admin password for installations that +do not support setting instance passwords.
Refactor SendKeysTests.setUp() Use Application.start() consistently across linux/windows platforms
@@ -41,11 +41,11 @@ import unittest import subprocess import time sys.path.append(".") +from pywinauto.application import Application if sys.platform == 'win32': from pywinauto.keyboard import send_keys, parse_keys, KeySequenceError from pywinauto.keyboard import KeyAction, VirtualKeyAction, PauseAction from pywinauto.sysinfo import is_x64_Python, is_x64_OS - from pywinauto.application import Application else: from pywinauto import mouse from pywinauto.linux.keyboard import send_keys, KeySequenceError, KeyAction @@ -77,13 +77,13 @@ class SendKeysTests(unittest.TestCase): def setUp(self): """Start the application set some data and ensure the application is in the state we want it.""" - if sys.platform == 'win32': self.app = Application() + if sys.platform == 'win32': self.app.start(_notepad_exe()) self.dlg = self.app.UntitledNotepad self.ctrl = self.dlg.Edit else: - self.app = subprocess.Popen("exec " + _test_app(), shell=True) + self.app.start(_test_app()) time.sleep(0.1) mouse.click(coords=(300, 300)) time.sleep(0.1)
Partial fix for github issue this fixes an error message when canceling the Save dialog for Cuts plugin under Qt5.
@@ -1002,32 +1002,28 @@ class Cuts(GingaPlugin.LocalPlugin): target = Widgets.SaveDialog( title='Save {0} data'.format(mode)).get_path() + if isinstance(target, tuple): + # is this always a tuple? + filename = target[0] + if filename == '': + # user canceled dialog + return + else: + filename = target + # Save cancelled - if not target: + if not filename: return # TODO: This can be a user preference? fig_dpi = 100 if mode == 'cuts': - # Save as fits file - image = self.fitsimage.get_image() - self.fv.error_wrap(image.save_as_file, target + '.fits') - fig, xarr, yarr = self.cuts_plot.get_data() elif mode == 'slit': fig, xarr, yarr = self.slit_plot.get_data() - if isinstance(target, tuple): - # is this always a tuple? - filename = target[0] - if filename == '': - # user canceled dialog - return False - else: - filename = target - figname = filename + '.png' self.logger.info("saving figure as: %s" % (figname)) fig.savefig(figname, dpi=fig_dpi)
cephadm-adopt: set application on ganesha pool Set the nfs application to the ganesha pool. Closes:
ceph_pool: name: "{{ nfs_ganesha_export_pool_name | default('nfs-ganesha') }}" cluster: "{{ cluster }}" + application: nfs delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true environment:
Blacklist sphinxcontrib-bibtex v2.1.0 See mcmtroffaes/sphinxcontrib-bibtex#221
@@ -52,7 +52,7 @@ plot = matplotlib!=2.1.1 interactive = ipykernel docs = sphinx>=1.6.7,!=2.1.0,!=3.2.0 - sphinxcontrib-bibtex + sphinxcontrib-bibtex!=2.1.0 sphinx_rtd_theme>=0.2.4 tests = pytest>=4.3 hypothesis
Bugfix: Fix out of bounds error for getting z notes Fixes this out of bounds error by reducing the index number by 1 when the index produced by searchsorted is equal to the last index of t_instruments
@@ -62,6 +62,10 @@ def get_z_notes(start_times, z_instruments, t_instruments): z_notes = [] for t in start_times: idx = np.searchsorted(t_instruments, t, side='left') - 1 + + if idx.item() == t_instruments.size - 1: + idx -= 1 + t_left = t_instruments[idx] t_right = t_instruments[idx + 1] interp = (t - t_left) / (t_right - t_left)
Remove stale test code line This line was producing a fake error message when trying to load user modules with the `-m` option.
@@ -497,7 +497,6 @@ def main(): for m in options.user_modules: try: rt.modules_system.load_module(m, force=True) - raise EnvironError("test") except EnvironError as e: printer.warning("could not load module '%s' correctly: " "Skipping..." % m)
Refactor get_relevant_case_updates_from_form_json Add type hints to show why we can remove an unnecessary assertion. Invert an `if` clause and use `continue`.
+from typing import List, Optional + import attr from casexml.apps.case.xform import extract_case_blocks @@ -18,19 +20,24 @@ class RepeaterResponse: retry = attr.ib(default=True) -def get_relevant_case_updates_from_form_json(domain, form_json, case_types, extra_fields, - form_question_values=None): +def get_relevant_case_updates_from_form_json( + domain: str, + form_json: dict, + case_types: list, + extra_fields: list, + form_question_values: Optional[dict] = None, +) -> List[CaseTriggerInfo]: result = [] case_blocks = extract_case_blocks(form_json) - cases = CaseAccessors(domain).get_cases( - [case_block['@case_id'] for case_block in case_blocks], ordered=True) - + case_ids = [case_block['@case_id'] for case_block in case_blocks] + cases = CaseAccessors(domain).get_cases(case_ids, ordered=True) db_case_dict = {case.case_id: case for case in cases} for case_block in case_blocks: case = db_case_dict[case_block['@case_id']] + if case_types and case.type not in case_types: + continue - if not case_types or case.type in case_types: case_create = case_block.get('create') or {} case_update = case_block.get('update') or {} @@ -44,7 +51,7 @@ def get_relevant_case_updates_from_form_json(domain, form_json, case_types, extr updates={**case_create, **case_update}, created='create' in case_block, closed='close' in case_block, - extra_fields={field: case.get_case_property(field) for field in extra_fields}, + extra_fields={f: case.get_case_property(f) for f in extra_fields}, form_question_values=form_question_values or {}, ))
Make spatial depthwise convolution warp size aware Summary: Use new macro and remove hard-coded path. Pull Request resolved:
#include <THCUNN/SharedMem.cuh> #include <THCUNN/common.h> #include <algorithm> +#include <c10/macros/Macros.h> -const int WARP_SIZE = 32; // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. const int MAX_BLOCK_SIZE = 256; static int getGradParamsNumThreads(int batchSize){ //warp per item in a batch, up to a maximum - return std::min(batchSize * WARP_SIZE, MAX_BLOCK_SIZE); + return std::min(batchSize * C10_WARP_SIZE, MAX_BLOCK_SIZE); } @@ -213,9 +213,9 @@ __global__ void spatialDepthwiseConvolutionAccGradParameters( AccT grad = ScalarConvert<float, AccT>::to(0.0); - const int laneId = threadIdx.x % WARP_SIZE; - const int batch = threadIdx.x / WARP_SIZE; - const int nwarps = blockDim.x / WARP_SIZE; + const int laneId = threadIdx.x % C10_WARP_SIZE; + const int batch = threadIdx.x / C10_WARP_SIZE; + const int nwarps = blockDim.x / C10_WARP_SIZE; const int imageElements = outputWidth * outputHeight; // Use warp per item. In the original kernel, a threadblock was used to sum over NHW. // Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the @@ -227,7 +227,7 @@ __global__ void spatialDepthwiseConvolutionAccGradParameters( // bring a nice speed-up. for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){ // Warp-stride loop over elements in a batch item - for (IndexType idx = laneId; idx < imageElements; idx += WARP_SIZE) { + for (IndexType idx = laneId; idx < imageElements; idx += C10_WARP_SIZE) { // Need to calculate the following: batch position, and offset into the gradOutput // in height, and width. We can intuit the corresponding position in the input from // the other parameters we have
Fixed __len__, __setitem__ and __iter__ function of sequence with default value Now, sequence emulates __len__ and __iter__ behaviour of list without actually implement a list. Highest index is know stored in case a list is changed at later point. Then, list is initialized with highest known index.
@@ -7,6 +7,7 @@ __copyright__ = "oemof developer group" __license__ = "GPLv3" from collections import abc, UserList +from itertools import repeat def sequence(sequence_or_scalar): @@ -66,9 +67,11 @@ class _Sequence(UserList): def __init__(self, *args, **kwargs): self.default = kwargs["default"] self.default_changed = False + self.highest_index = -1 super().__init__(*args) def __getitem__(self, key): + self.highest_index = max(self.highest_index, key) if not self.default_changed: return self.default try: @@ -78,9 +81,23 @@ class _Sequence(UserList): return self.data[key] def __setitem__(self, key, value): + if not self.default_changed: self.default_changed = True + self.__init_list() try: self.data[key] = value except IndexError: self.data.extend([self.default] * (key - len(self.data) + 1)) self.data[key] = value + + def __init_list(self): + self.data = [self.default] * (self.highest_index + 1) + + def __len__(self): + return max(len(self.data), self.highest_index + 1) + + def __iter__(self): + if self.default_changed: + super(_Sequence, self).__iter__() + else: + return repeat(self.default, self.highest_index + 1)
[pytorch] Minor: boilerplate to propagate errors in request_callback_impl Summary: Pull Request resolved: Out of caution, avoid assuming that there's never a failure in a couple of request_calback_impl case handlers, but rather propagate the error. ghstack-source-id: Test Plan: buck test mode/dev-nosan caffe2/test/...
@@ -143,10 +143,14 @@ std::shared_ptr<FutureMessage> RequestCallbackImpl::processRpc( whenValueSet->addCallback( [responseFuture, messageId, rref]( const rpc::Message& /* unused */, - const c10::optional<utils::FutureError>& /* unused */) { + const c10::optional<utils::FutureError>& error) { + if (!error) { Message m = ScriptRRefFetchRet({rref->getValue()}).toMessage(); m.setId(messageId); - responseFuture->markCompleted(m); + responseFuture->markCompleted(std::move(m)); + } else { + responseFuture->setError(error->what()); + } }); return responseFuture; } @@ -167,12 +171,17 @@ std::shared_ptr<FutureMessage> RequestCallbackImpl::processRpc( whenValueSet->addCallback( [responseFuture, messageId, rref]( const rpc::Message& /* unused */, - const c10::optional<utils::FutureError>& /* unused */) { - SerializedPyObj result = PythonRpcHandler::getInstance().serialize( + const c10::optional<utils::FutureError>& error) { + if (!error) { + SerializedPyObj result = + PythonRpcHandler::getInstance().serialize( jit::toPyObject(rref->getValue())); Message m = PythonRRefFetchRet(result.toIValues()).toMessage(); m.setId(messageId); - responseFuture->markCompleted(m); + responseFuture->markCompleted(std::move(m)); + } else { + responseFuture->setError(error->what()); + } }); return responseFuture; }
Change flite command to a variable for Codacy Changing the ["flite","-lv"] command being passed to subprocess into a variable to see if that makes Codacy happier.
@@ -1226,8 +1226,9 @@ def get_tts_engine(profile): ) elif(get_profile_var(profile, ["tts_engine"]) == "flite-tts"): try: + flite_cmd = ['flite', '-lv'] voices = subprocess.check_output( - ['flite', '-lv'], + flite_cmd, shell=False ).decode('utf-8').split(" ")[2:-1] print(
docs: Replace A icon with help in format using markdown. In the format-your-message-using-markdown, in the in-help help section, the A icon is replace by help button. This updates the docs.
@@ -273,7 +273,7 @@ A summary of the formatting syntax is available in-app. {!start-composing.md!} -1. Click the A (<i class="fa fa-font"></i>) icon at the bottom of the compose box. +1. Click help at the bottom of the compose box. {end_tabs}
[refactor] Just format error in format_error Don't set the error, just return the formatted error and let the caller handle setting.
@@ -370,7 +370,7 @@ class JsPrettierCommand(sublime_plugin.TextCommand): shell=self.is_windows()) stdout, stderr = proc.communicate(input=source.encode('utf-8')) if stderr or proc.returncode != 0: - self.format_error_message(stderr.decode('utf-8'), str(proc.returncode)) + self.error_message = self.format_error_message(stderr.decode('utf-8'), str(proc.returncode)) return None return stdout.decode('utf-8') except OSError as ex: @@ -579,7 +579,7 @@ class JsPrettierCommand(sublime_plugin.TextCommand): '{1}'.format(PLUGIN_NAME, self.error_message)) def format_error_message(self, error_message, error_code): - self.error_message = 'Prettier reported the following ' \ + return 'Prettier reported the following ' \ 'error:\n\n{0}\n' \ 'Process finished with exit code {1}\n'\ .format(error_message, '{0}'
Replace `from numpy.random import poisson` with `import numpy as np` in `cirq-core/cirq/contrib/acquaintance/gates_test.py` Fixes:
@@ -17,7 +17,7 @@ from random import randint from string import ascii_lowercase as alphabet from typing import Optional, Sequence, Tuple -from numpy.random import poisson +import numpy as np import pytest import cirq @@ -233,7 +233,7 @@ def test_swap_network_init_error(): part_lens_and_acquaintance_sizes = [ - [[l + 1 for l in poisson(size=n_parts, lam=lam)], poisson(4)] + [[l + 1 for l in np.random.poisson(size=n_parts, lam=lam)], np.random.poisson(4)] for n_parts, lam in product(range(2, 20, 3), range(1, 4)) ]
fix: Show alert message Append indicator element instead of adding class to parent div to avoid css bleed
@@ -283,12 +283,12 @@ frappe.show_alert = function(message, seconds=7, actions={}) { <a class="close">&times;</a> </div>`); - div.find('.alert-message').append(message.message); - if(message.indicator) { - div.find('.alert-message').addClass('indicator '+ message.indicator); + div.find('.alert-message').append(`<span class="indicator ${message.indicator}"></span>`); } + div.find('.alert-message').append(message.message); + if (body_html) { div.find('.alert-body').show().html(body_html); }
Put the TODO comment in a YAML array. The `contains:` map needs an array of strings. By placing the TODO comment between brackets I hope that the user will be more likely to produce a valid file. Closes:
@@ -241,7 +241,7 @@ class ModulesTestYmlBuilder(object): test_files[i].pop("md5sum") test_files[i][ "contains" - ] = "# TODO nf-core: file md5sum was variable, please replace this text with a string found in the file instead" + ] = "[ # TODO nf-core: file md5sum was variable, please replace this text with a string found in the file instead ]" if len(test_files) == 0: raise UserWarning(f"Could not find any test result files in '{results_dir}'")
upload: respect --yes with large upload confirmation If the user passes in --yes, don't prompt them to confirm large uploads. Tested-by: Mike Frysinger
@@ -262,7 +262,7 @@ Gerrit Code Review: https://www.gerritcodereview.com/ answer = sys.stdin.readline().strip().lower() answer = answer in ('y', 'yes', '1', 'true', 't') - if answer: + if not opt.yes and answer: if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD: answer = _ConfirmManyUploads() @@ -335,6 +335,7 @@ Gerrit Code Review: https://www.gerritcodereview.com/ if not todo: _die("nothing uncommented for upload") + if not opt.yes: many_commits = False for branch in todo: if len(branch.commits) > UNUSUAL_COMMIT_THRESHOLD:
removeLocations with a Path object addLocations with a Path object
@@ -125,6 +125,7 @@ def test_library_add_edit_delete(plex, movies, photos): # Create Other Videos library = No external metadata scanning section_name = "plexapi_test_section" movie_location = movies.locations[0] + movie_path = plex.browse(path=movie_location) photo_location = photos.locations[0] plex.library.add( name=section_name, @@ -172,6 +173,12 @@ def test_library_add_edit_delete(plex, movies, photos): section.addLocations(photo_location) section.reload() assert len(section.locations) == 2 + section.removeLocations(movie_path) + section.reload() + assert len(section.locations) == 1 + section.addLocations(movie_path) + section.reload() + assert len(section.locations) == 2 # Attempt to remove all locations with pytest.raises(BadRequest): plex.library.removeLocations(section.locations)
[commands] Fix cog eject behaviour with application commands This was using the old attribute I forgot to change.
@@ -514,9 +514,8 @@ class Cog(metaclass=CogMeta): if not cls.__cog_is_app_commands_group__: for command in self.__cog_app_commands__: - try: - guild_ids = command.__discord_app_commands_default_guilds__ - except AttributeError: + guild_ids = command._guild_ids + if guild_ids is None: bot.tree.remove_command(command.name) else: for guild_id in guild_ids:
add check for pointer to string Check if memory referenced is a pointer to a string. Fixes mimikatz string test.
import re import string +import struct from smda.common.SmdaReport import SmdaReport @@ -172,6 +173,18 @@ def extract_insn_string_features(f, bb, insn): string_read = read_string(f.smda_report, data_ref) if string_read: yield String(string_read.rstrip("\x00")), insn.offset + continue + + # test to see if we're referencing a pointer and that points to a string + bytes_ = read_bytes(insn.smda_function.smda_report, data_ref, num_bytes=4) + val = struct.unpack("I", bytes_)[0] + if val and insn.smda_function.smda_report.isAddrWithinMemoryImage(val): + # it is a pointer, check if it points to a string + string_read = read_string(f.smda_report, val) + if string_read: + yield String(string_read.rstrip("\x00")), insn.offset + continue + def extract_insn_offset_features(f, bb, insn):
Fix border/outline color of inline code in tables Fixes
@@ -219,6 +219,7 @@ a:visited.has-code span { } /* table fixes */ + table, article.pytorch-article table, article.pytorch-article .wy-table-responsive table { @@ -237,6 +238,16 @@ article.pytorch-article table td:first-of-type code { white-space: nowrap; } +article.pytorch-article .wy-table-responsive table tbody td code { + /* move outline to enclosing code tag instead of span.pre, and fix colors */ + background-color: #ffffff; + border: 1px solid #e9e9e9; +} + +article.pytorch-article .wy-table-responsive table tbody td code span.pre { + outline: none; +} + /* fixes for code line numbers */ article.pytorch-article div[class*="highlight-"] {
[BUG] Wrong icon on the place order button Use paper plane as symbol Fixes
</div> {% if order.status == PurchaseOrderStatus.PENDING %} <button type='button' class='btn btn-outline-secondary' id='place-order' title='{% trans "Place order" %}'> - <span class='fas fa-shopping-cart icon-blue'></span> + <span class='fas fa-paper-plane icon-blue'></span> </button> {% elif order.status == PurchaseOrderStatus.PLACED %} <button type='button' class='btn btn-primary' id='receive-order' title='{% trans "Receive items" %}'>
Update argo_index_pa.py Improved logging
@@ -190,7 +190,7 @@ class indexstore(ArgoIndexStoreProto): with self.fs['index'].open(this_path + '.gz', "rb") as fg: with gzip.open(fg) as f: self.index = read_csv(f) - log.debug("Argo index file loaded with pyarrow read_csv from: %s" % f.name) + log.debug("Argo index file loaded with pyarrow read_csv from: '%s'" % f.name) else: with self.fs['index'].open(this_path, "rb") as f: self.index = read_csv(f) @@ -207,7 +207,8 @@ class indexstore(ArgoIndexStoreProto): else: summary.append("Loaded: False") if hasattr(self, 'search'): - summary.append("Searched: True (%i records, %0.4f%%)" % (self.N_FILES, self.N_FILES * 100 / self.shape[0])) + match = 'matches' if self.N_FILES > 1 else 'match' + summary.append("Searched: True (%i %s, %0.4f%%)" % (self.N_FILES, match, self.N_FILES * 100 / self.shape[0])) else: summary.append("Searched: False") return "\n".join(summary) @@ -227,9 +228,10 @@ class indexstore(ArgoIndexStoreProto): raise DataNotFound("No Argo data in the index correspond to your search criteria." "Search definition: %s" % self.cname()) - this_path = self.sha_df + # this_path = self.sha_df + this_path = self.host + "/" + self.index_file + "/" + self.sha_df if self.fs['search'].exists(this_path): - log.debug('Search results already in memory as dataframe') + log.debug('Search results already in memory as dataframe, loading...') with self.fs['search'].fs.open(this_path, "rb") as of: df = pd.read_pickle(of) else: @@ -264,20 +266,21 @@ class indexstore(ArgoIndexStoreProto): def run(self): """ Filter index with search criteria """ - search_path = self.sha_pq + search_path = self.host + "/" + self.index_file + "/" + self.sha_pq if self.fs['search'].exists(search_path): - log.debug('Search results already in memory as pyarrow table, loading') + log.debug('Search results already in memory as pyarrow table, loading...') with self.fs['search'].fs.open(search_path, "rb") as of: self.search = pa.parquet.read_table(of) else: log.debug('Compute search from scratch') self.search = self.index.filter(self.search_filter) + log.debug('Found %i matches' % self.search.shape[0]) if self.cache: with self.fs['search'].open(search_path, "wb") as of: pa.parquet.write_table(self.search, of) with self.fs['search'].fs.open(search_path, "rb") as of: self.search = pa.parquet.read_table(of) - log.debug('Search results as pyarrow table saved in cache') + log.debug('Search results saved in cache as pyarrow table') return self def read_wmo(self):
Minor bugfix for demcz setting firstcall=True
@@ -179,6 +179,7 @@ class demcz(_algorithm): history.add_group('interest', slices) ### BURN_IN + firstcall = True burnInpar = [np.zeros((nChains, dimensions))] * nSeedIterations for i in range(nSeedIterations): self._logPs = []
Burn 1 instead of 255 This aligns better to the value of an int-converted boolean image
@@ -56,7 +56,7 @@ def rasterize_file_thin_line(vector_filename_in, reference_file, size = reference_file.RasterYSize, reference_file.RasterXSize gdal_utils.gdal_save(numpy.zeros(size, dtype=numpy.uint8), reference_file, raster_filename_out, gdal.GDT_Byte) - subprocess.run(['gdal_rasterize', '-burn', '255'] + subprocess.run(['gdal_rasterize', '-burn', '1'] + ([] if query is None else ['-where', query]) + [vector_filename_in, raster_filename_out], check=True,
Instruments/EnergyMeas: Adds support for DerivedMeasurement from devlib Delvib now is capable of performing postprocessing of MeasurementCSV files, instead of calculating additional metrics in WA this will be performed externally. Currently support has been added for calculating average power and cumulative energy.
# pylint: disable=W0613,E1101 from __future__ import division import os -from collections import defaultdict from devlib.instrument import CONTINUOUS from devlib.instrument.energy_probe import EnergyProbeInstrument from devlib.instrument.daq import DaqInstrument from devlib.instrument.acmecape import AcmeCapeInstrument from devlib.utils.misc import which +from devlib.derived.derived_measurements import DerivedEnergyMeasurements from wa import Instrument, Parameter from wa.framework import pluginloader @@ -253,24 +253,7 @@ class EnergyMeasurement(Instrument): self.extract_metrics(context) def extract_metrics(self, context): - measurements = self.measurement_csv.itermeasurements() - energy_results = defaultdict(dict) - power_results = defaultdict(int) - - for count, row in enumerate(measurements): - for entry in row: - channel = entry.channel - if channel.kind == 'energy': - if count == 0: - energy_results[channel.site]['start'] = entry.value - else: - energy_results[channel.site]['end'] = entry.value - elif channel.kind == 'power': - power_results[channel.site] += entry.value - - for site in energy_results: - total_energy = energy_results[site]['end'] - energy_results[site]['start'] - context.add_metric('{}_energy'.format(site), total_energy, 'joules') - for site in power_results: - power = power_results[site] / count + 1 #pylint: disable=undefined-loop-variable - context.add_metric('{}_power'.format(site), power, 'watts') + derived_measurements = DerivedEnergyMeasurements.process(self.measurement_csv) + for meas in derived_measurements: + name = '{}_{}'.format(meas.channel.site, meas.channel.name) + context.add_metric(name, meas.value, meas.units)
UPDATE getting_started.rst - improve wording Found a few sentences in the Docs that I thought could be a bit more readable, hopefully improved them.
@@ -20,7 +20,7 @@ If you want to install ``moto`` from source:: Moto usage ---------- -For example we have the following code we want to test: +For example, we have the following code we want to test: .. sourcecode:: python @@ -39,12 +39,12 @@ For example we have the following code we want to test: k.key = self.name k.set_contents_from_string(self.value) -There are several method to do this, just keep in mind Moto creates a full blank environment. +There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment. Decorator ~~~~~~~~~ -With a decorator wrapping all the calls to S3 are automatically mocked out. +With a decorator wrapping, all the calls to S3 are automatically mocked out. .. sourcecode:: python @@ -66,7 +66,7 @@ With a decorator wrapping all the calls to S3 are automatically mocked out. Context manager ~~~~~~~~~~~~~~~ -Same as decorator, every call inside ``with`` statement are mocked out. +Same as the Decorator, every call inside the ``with`` statement is mocked out. .. sourcecode:: python @@ -83,7 +83,7 @@ Same as decorator, every call inside ``with`` statement are mocked out. Raw ~~~ -You can also start and stop manually the mocking. +You can also start and stop the mocking manually. .. sourcecode:: python @@ -104,11 +104,11 @@ You can also start and stop manually the mocking. Stand-alone server mode ~~~~~~~~~~~~~~~~~~~~~~~ -Moto comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. It is very useful to test even if you don't use Python. +Moto also comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. For testing purposes, it's extremely useful even if you don't use Python. .. sourcecode:: bash $ moto_server ec2 -p3000 * Running on http://127.0.0.1:3000/ -This method isn't encouraged if you're using ``boto``, best is to use decorator method. +However, this method isn't encouraged if you're using ``boto``, the best solution would be to use a decorator method.
Fix CodeHashingTest.test_external_module altair no longer exports `vegalite.v3` by default. Add an explicit import.
@@ -22,7 +22,7 @@ import tempfile import time import unittest -import altair as alt +import altair.vegalite.v3 import numpy as np import pandas as pd import pytest @@ -406,11 +406,14 @@ class CodeHashTest(unittest.TestCase): def test_external_module(self): """Test code that references an external module.""" + # NB: If a future vegalite update removes the v3 API, these functions + # will need to be updated! + def call_altair_concat(): - return alt.vegalite.v3.api.concat() + return altair.vegalite.v3.api.concat() def call_altair_layer(): - return alt.vegalite.v3.api.layer() + return altair.vegalite.v3.api.layer() self.assertNotEqual(get_hash(call_altair_concat), get_hash(call_altair_layer))
RPR subdivision error. Following error could be happen: File "D:\RadeonProRenderBlenderAddon\src\rprblender\properties\object.py", line 120, in export_subdivision factor = int(math.log2(16.0 / self.subdivision_factor)) ZeroDivisionError: float division by zero Fixed by setting min=0.01 for subdivision factor.
@@ -76,7 +76,7 @@ class RPR_ObjectProperites(RPR_Properties): subdivision_factor: FloatProperty( name="Adaptive Level", description="Subdivision factor for mesh, in pixels that it should be subdivided to. For finer subdivision set lower.", - min=0.0, soft_max=10.0, + min=0.01, soft_max=10.0, default=1.0 ) subdivision_boundary_type: EnumProperty(
Fix info on policy entries in Identity txn fam spec Each policy is a list of type/key pairs (not a type and key list) Also corrected the proto code to match the contents of protos/identity.proto
@@ -44,8 +44,8 @@ State Policies -------- A policy will have a name and a list of entries. Each policy entry will have a -type and a key list. The type will be either PERMIT_KEY or DENY_KEY and the key -list will be a list of public keys. +list of type/key pairs. The type will be either PERMIT_KEY or DENY_KEY. +Each key in a type/key pair will be a public key. .. code-block:: protobuf @@ -55,22 +55,25 @@ list will be a list of public keys. } message Policy { - enum Type { - PERMIT_KEY = 0; - DENY_KEY = 1; + + enum EntryType { + ENTRY_TYPE_UNSET = 0; + PERMIT_KEY = 1; + DENY_KEY = 2; } message Entry { // Whether this is a PERMIT_KEY or DENY_KEY entry - Type type = 1; - // This should be a public key or * to refer to all participants. - string key = 2; + EntryType type = 1; + // This should a public key or * to refer to all participants. + string key = 2; } // name of the policy, this should be unique. string name = 1; + // list of Entries // The entries will be processed in order from first to last. repeated Entry entries = 2; }
Update README.md Removed old forum link Renamed XBMC to Kodi in readme
##What is Maraschino? -I wanted a simple web interface to act as a nice overview/front page for my XBMC HTPC. I couldn't find anything that suited my requirements so I created something myself. +I wanted a simple web interface to act as a nice overview/front page for my Kodi HTPC. I couldn't find anything that suited my requirements so I created something myself. You can find more information and setup instructions on the [project homepage](http://www.maraschinoproject.com/ "Maraschino Project homepage"). -There is now also an [official forum](http://forums.maraschinoproject.com/) which is your best bet for support, feature requests and bug reports. - -In addition, there's also a thread on the [XBMC forums](http://forum.xbmc.org/showthread.php?t=113136 "XBMC forums"). +In addition, there's also a thread on the [Kodi forums](http://forum.kodi.tv/showthread.php?tid=113136&highlight=maraschino "Kodi forums"). ##Screenshots <img src="http://www.maraschinoproject.com/static/images/screenshot1.jpg" width="355">&nbsp;&nbsp;<img src="http://www.maraschinoproject.com/static/images/screenshot2.jpg" width="355"> @@ -21,11 +19,11 @@ In addition, there's also a thread on the [XBMC forums](http://forum.xbmc.org/sh * **Customisable applications module** providing quick access to your web interfaces (e.g. SABnzb+, SickBeard, or whatever else you want to link to). -* **Recently added episodes/movies/albums modules** - click an episode or movie to play it in XBMC. +* **Recently added episodes/movies/albums modules** - click an episode or movie to play it in Kodi. -* **Media library browser** - browse your entire movie and TV library, and click to play in XBMC. Queue items to play after. +* **Media library browser** - browse your entire movie and TV library, and click to play in Kodi. Queue items to play after. -* **Control multiple XBMC servers** - do you have an XBMC server in both the living room and bedroom? Switch between the two instantly and control them using Maraschino. Send notification messages to them remotely! +* **Control multiple Kodi servers** - do you have an Kodi server in both the living room and bedroom? Switch between the two instantly and control them using Maraschino. Send notification messages to them remotely! * **SABnzbd+ and NZBGet modules** manage your nzb queue from within Maraschino - they appear when something is downloading and hide again when finished - shows you what is currently downloading, speed, time/MB remaining, and a progress bar. Control (pause, resume, speed limit) your downloads.
Tornado client now uses asyncio. This means it will only work with Python 3.5+. Closes
@@ -17,12 +17,11 @@ client:: from functools import partial from tornado.httpclient import AsyncHTTPClient -from tornado.concurrent import Future -from .client import Client +from .async_client import AsyncClient -class TornadoClient(Client): +class TornadoClient(AsyncClient): """ :param endpoint: The server address. :param async_http_client_class: Tornado asynchronous HTTP client class. @@ -35,23 +34,7 @@ class TornadoClient(Client): super(TornadoClient, self).__init__(*args, **kwargs) self.http_client = async_http_client or AsyncHTTPClient() - def _request_sent(self, future, response): - """Callback when request has been sent""" - if response.error: - future.set_exception(response.error) - else: - future.set_result( - self.process_response( - response.body.decode(), - { - "http_code": response.code, - "http_reason": response.reason, - "http_headers": response.headers, - }, - ) - ) - - def send_message(self, request, **kwargs): + async def send_message(self, request, **kwargs): """ Transport the message to the server and return the response. @@ -62,13 +45,14 @@ class TornadoClient(Client): headers = dict(self.DEFAULT_HEADERS) headers.update(kwargs.pop("headers", {})) - future = Future() - self.http_client.fetch( - self.endpoint, - method="POST", - body=request, - headers=headers, - callback=partial(self._request_sent, future), - **kwargs + response = await self.http_client.fetch( + self.endpoint, method="POST", body=request, headers=headers, **kwargs + ) + + # Note: Tornado adds it's own logger handlers, so the following log format isn't + # used, unless Tornado's handlers are disabled. + return self.process_response( + response.body.decode(), + log_extra={"http_code": response.code, "http_reason": response.reason}, + log_format="<-- %(message)s (%(http_code)s %(http_reason)s)", ) - return future
Supress docker-compose image pulling progress bar. This adds a huge amount of logging to our CircleCI logs and forces us in 100% of our tests to download the logs instead of viewing them in CircleCI directly. Might even speed things up a bit.
@@ -89,6 +89,7 @@ jobs: command: | set -x sudo sysctl -w vm.max_map_count=262144 + docker-compose pull --quiet docker-compose up -d sleep 20 docker-compose ps
Change condition in swap module Summary: Pull Request resolved: Test Plan: python test/test_quantization.py Imported from OSS
@@ -275,7 +275,7 @@ def swap_module(mod, mapping): The corresponding quantized module of `mod` """ new_mod = mod - if hasattr(mod, 'observer'): + if hasattr(mod, 'qconfig') and mod.qconfig is not None: if type(mod) in mapping: new_mod = mapping[type(mod)].from_float(mod)
Integ tests: set custom packages config after jinja rendering when adding custom pacakges configs we parse the cluster config file and in case jinja templates directives are present this might fail
@@ -216,12 +216,12 @@ def pcluster_config_reader(test_datadir, vpc_stacks, region, request): def _config_renderer(**kwargs): config_file_path = test_datadir / config_file - _add_custom_packages_configs(config_file_path, request) default_values = _get_default_template_values(vpc_stacks, region, request) file_loader = FileSystemLoader(str(test_datadir)) env = Environment(loader=file_loader) rendered_template = env.get_template(config_file).render(**{**kwargs, **default_values}) config_file_path.write_text(rendered_template) + _add_custom_packages_configs(config_file_path, request) return config_file_path return _config_renderer
[DOC] Fix typos in tutorials fix some typos
@@ -38,7 +38,7 @@ import numpy as np # ------------------------------- # The most straight-forward way to call target specific function is via # extern function call construct in tvm. -# In th following example, we use :any:`tvm.call_pure_extern` to call +# In the following example, we use :any:`tvm.call_pure_extern` to call # :code:`__expf` function, which is only available under CUDA. # n = tvm.var("n") @@ -119,7 +119,7 @@ print(fcuda.imported_modules[0].get_source()) ###################################################################### # Add Your Own Intrinsic # ---------------------- -# If there is an instrinsic that is not provided by TVM. +# If there is an intrinsic that is not provided by TVM. # User can easily add new intrinsic by using the intrinsic rule system. # The following example add an intrinsic :code:`mylog` to the system. #
Prepare 2.4.0rc5. [ci skip-rust] [ci skip-build-wheels]
See https://www.pantsbuild.org/v2.4/docs/release-notes-2-4 for an overview of the changes in this release series. +## 2.4.0rc5 (Apr 17, 2021) + +### Bug fixes + +* Wait for all Sessions during pantsd shutdown (cherrypick of #11929) ([#11934](https://github.com/pantsbuild/pants/pull/11934)) + +* Retrieve RunTracker args from the OptionsBootstrapper. (cherrypick of #11931) ([#11932](https://github.com/pantsbuild/pants/pull/11932)) + ## 2.4.0rc4 (Apr 14, 2021) ### Bug fixes
Core & Internals: fix timer names in delete_dids The same name was used for multiple code blocks and this makes the timer not very useful.
@@ -1439,7 +1439,7 @@ def _delete_dids( record_counter(name='undertaker.content.rowcount', delta=rowcount) # Remove CollectionReplica - with record_timer_block('undertaker.dids'): + with record_timer_block('undertaker.collection_replicas'): stmt = delete( models.CollectionReplica ).where( @@ -1466,7 +1466,7 @@ def _delete_dids( session.bulk_insert_mappings(temp_table, collection_dids.values()) data_in_temp_table = collection_dids - with record_timer_block('undertaker.dids'): + with record_timer_block('undertaker.dids_followed'): stmt = delete( models.DidsFollowed ).where(
[ci/release] Fix result output in Buildkite pipeline run The new buildkite pipeline prints out faulty results due to a confusion of -ge/-gt and -le/-lt in the retry script. This is a cosmetic error (so behavior was still correct) that is resolved with this PR.
@@ -12,15 +12,15 @@ reason() { # Keep in sync with e2e.py ExitCode enum if [ "$1" -eq 0 ]; then REASON="success" - elif [ "$1" -ge 1 ] && [ "$1" -le 10 ]; then + elif [ "$1" -ge 1 ] && [ "$1" -lt 10 ]; then REASON="runtime error" - elif [ "$1" -gt 10 ] && [ "$1" -le 20 ]; then + elif [ "$1" -ge 10 ] && [ "$1" -lt 20 ]; then REASON="infra error" - elif [ "$1" -gt 30 ] && [ "$1" -le 40 ]; then + elif [ "$1" -ge 30 ] && [ "$1" -lt 40 ]; then REASON="infra timeout" elif [ "$1" -eq 42 ]; then REASON="command timeout" - elif [ "$1" -gt 40 ] && [ "$1" -le 50 ]; then + elif [ "$1" -ge 40 ] && [ "$1" -lt 50 ]; then REASON="command error" fi echo "${REASON}" @@ -125,7 +125,7 @@ echo "Final release test exit code is ${EXIT_CODE} (${REASON})" if [ "$EXIT_CODE" -eq 0 ]; then echo "RELEASE MANAGER: This test seems to have passed." -elif [ "$EXIT_CODE" -gt 30 ] && [ "$EXIT_CODE" -le 40 ]; then +elif [ "$EXIT_CODE" -ge 30 ] && [ "$EXIT_CODE" -lt 40 ]; then echo "RELEASE MANAGER: This is likely an infra error that can be solved by RESTARTING this test." else echo "RELEASE MANAGER: This could be an error in the test. Please REVIEW THE LOGS and ping the test owner."
[samplers/raysampler.py] change arguments ordering same as other samplers
@@ -163,7 +163,7 @@ class EpiSampler(object): Default (empty node_info) is using ray scheduling policy. """ - def __init__(self, pol, env, num_parallel=8, prepro=None, seed=256, + def __init__(self, env, pol, num_parallel=8, prepro=None, seed=256, node_info={}): pol = copy.deepcopy(pol) pol.to('cpu')
Add regression test Closes sympy/sympy#7171
@@ -170,7 +170,7 @@ def test_sin_rewrite(): assert sin(cot(x)).rewrite( exp).subs(x, 3).n() == sin(x).rewrite(exp).subs(x, cot(3)).n() assert sin(log(x)).rewrite(Pow) == I*x**-I / 2 - I*x**I / 2 - assert sin(x).rewrite(Pow) == sin(x) + assert sin(x).rewrite(Pow) == sin(x) # issue sympy/sympy#7171 assert sin(x).rewrite(csc) == 1/csc(x)
[IMPR] Use SingleSiteBot with states_redirect.py Bot class is noted as to be deprecated. Use SingleSitesBot instead. StatesRedirectBot.site is set by super class but the generator must become an StatesRedirectBot property Create the abbrev in setup method
@@ -25,7 +25,7 @@ import re import pywikibot -from pywikibot.bot import suggest_help +from pywikibot.bot import SingleSiteBot, suggest_help from pywikibot import i18n try: @@ -34,32 +34,35 @@ except ImportError as e: pycountry = e -class StatesRedirectBot(pywikibot.Bot): +class StatesRedirectBot(SingleSiteBot): """Bot class used for implementation of re-direction norms.""" def __init__(self, start, force): """Initializer. - Parameters: - @param start:xxx Specify the place in the alphabet to start - searching. - @param force: Don't ask whether to create pages, just create - them. + @param start:xxx Specify the place in the alphabet to start searching. + @type start: str + @param force: Don't ask whether to create pages, just create them. + @type force: bool """ - site = pywikibot.Site() - generator = site.allpages(start=start) - super(StatesRedirectBot, self).__init__(generator=generator) - + super(StatesRedirectBot, self).__init__() + self.start = start self.force = force - # Created abbrev from pycountry data base + def setup(self): + """Create abbrev from pycountry data base.""" self.abbrev = {} for subd in pycountry.subdivisions: # Used subd.code[3:] to extract the exact code for # subdivisional states(ignoring the country code). self.abbrev[subd.name] = subd.code[3:] + @property + def generator(self): + """Generator used by run() method.""" + return self.site.allpages(start=self.start) + def treat(self, page): """Re-directing process.
test_signup: Test that cloning a system bot's email is not allowed. Just now this is largely redundant with `test_signup_already_active`; but very soon when we allow reusing an email across realms, the logic will diverge.
@@ -1470,6 +1470,16 @@ class UserSignUpTest(ZulipTestCase): result = self.client_get(result.url) self.assert_in_response("You've already registered", result) + def test_signup_system_bot(self) -> None: + email = "[email protected]" + result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear") + self.assertEqual(result.status_code, 302) + self.assertIn('login', result['Location']) + result = self.client_get(result.url) + + # This is not really the right error message, but at least it's an error. + self.assert_in_response("You've already registered", result) + def test_signup_invalid_name(self) -> None: """ Check if an invalid name during signup is handled properly.
Another small modification to replace static ESCU name with the name of your app
@@ -171,7 +171,14 @@ class Initialize: for fname in ["savedsearches_investigations.j2", "savedsearches_detections.j2", "analyticstories_investigations.j2", "analyticstories_detections.j2", "savedsearches_baselines.j2"]: full_path = os.path.join(filename_root, fname) self.simple_replace_line(full_path, original, updated) - #Generate directories? + + raw ='''{app_name} - ''' + original = raw.format(app_name=".escu.") # + updated = raw.format(app_name=f".{self.app_name}.") + filename_root = os.path.join(self.path,"bin/contentctl_project/contentctl_infrastructure/adapter/templates/") + for fname in ["savedsearches_investigations.j2", "savedsearches_detections.j2", "savedsearches_baselines.j2"]: + full_path = os.path.join(filename_root, fname) + self.simple_replace_line(full_path, original, updated) def generate_content_version_file(self): new_content_version = CONTENT_VERSION_FILE.format(version=self.app_version)
remover min from possible negatives in the schemas remover min from dT_Qcs (HVAC) and Tcs_set_C and Tcs_setb_C (USE TYPES) in schemas.yml file
@@ -1802,13 +1802,11 @@ get_building_comfort: type: float unit: '[C]' values: '{0.0...n}' - min: 0.0 Tcs_setb_C: description: Setback point of temperature for cooling system type: float unit: '[C]' values: '{0.0...n}' - min: 0.0 Ths_set_C: description: Setpoint temperature for heating system type: float @@ -2394,7 +2392,6 @@ get_database_air_conditioning_systems: type: float unit: '[C]' values: '{0.0...n}' - min: 0.0 dT_Qhs: description: correction temperature of emission losses due to control system of heating @@ -6950,13 +6947,11 @@ get_database_use_types_properties: type: float unit: '[C]' values: '{0.0...n}' - min: 0.0 Tcs_setb_C: description: Setback point of temperature for cooling system type: float unit: '[C]' values: '{0.0...n}' - min: 0.0 Ths_set_C: description: Setpoint temperature for heating system type: float
Add test whirl_direction() I used assert_equal for this test once it compares a list of strings. So it doesn't make sense applying a command which consideres tolerance in this case. Besides, it does need to be the exact output.
@@ -2,7 +2,7 @@ import os import numpy as np import pytest -from numpy.testing import assert_almost_equal, assert_allclose +from numpy.testing import assert_almost_equal, assert_allclose, assert_equal from ross.bearing_seal_element import * from ross.disk_element import * @@ -1022,6 +1022,10 @@ def test_whirl_values(rotor7): [1., 0., 1., 0., 1., 0.], atol=0 ) + assert_equal( + rotor7.whirl_direction(), + np.array(['Backward', 'Forward', 'Backward', 'Forward', 'Backward', 'Forward'], dtype='<U8') + ) def test_save_load():
Remove thrust_t from remainder_kernel_cuda Summary: complex is not supported, so no need to use thrust Pull Request resolved:
@@ -68,9 +68,8 @@ void mul_kernel_cuda(TensorIterator& iter) { void remainder_kernel_cuda(TensorIterator& iter) { if (isIntegralType(iter.dtype(), /*includeBool*/ false)) { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "remainder_cuda", [&]() { - using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; - gpu_kernel_with_scalars(iter, []GPU_LAMBDA(thrust_t a, thrust_t b) -> thrust_t { - thrust_t r = a % b; + gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { + scalar_t r = a % b; if ((r != 0) && ((r < 0) != (b < 0))) { r += b; } @@ -79,10 +78,9 @@ void remainder_kernel_cuda(TensorIterator& iter) { }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "remainder_cuda", [&]() { - using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel_with_scalars(iter, - []GPU_LAMBDA(thrust_t a, thrust_t b) __ubsan_ignore_float_divide_by_zero__ -> thrust_t { - return a - b * static_cast<thrust_t>(std::floor(a / b)); + []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t { + return a - b * static_cast<scalar_t>(std::floor(a / b)); }); }); }
correct link of flake8 correct link of flake8
@@ -27,9 +27,9 @@ is 0.22. We periodically update it to newer versions. Linting ------- -.. _`Flake8`: https://github.com/google/yapf +.. _`Flake8`: https://github.com/pycqa/flake8 -We use `Flake8` to check our code syntax. Lint tools basically provide these benefits. +We use `Flake8`_ to check our code syntax. Lint tools basically provide these benefits. - Prevent things like syntax errors or typos - Save our review time (no need to check unused codes or typos)
fix: fix TIM register CCMR1/2_Output TIM register CCRM2_output incorrectly listed the high bits. Fixes the naming of those register bits.
@@ -76,7 +76,7 @@ TIM[45]: - OR1 - OR2 - CCMR?_Output: + CCMR1_Output: _add: OC1M_3: description: Bit 3 of Output compare 1 mode @@ -87,6 +87,17 @@ TIM[45]: bitOffset: 24 bitWidth: 1 + CCMR2_Output: + _add: + OC3M_3: + description: Bit 3 of Output compare 3 mode + bitOffset: 16 + bitWidth: 1 + OC4M_3: + description: Bit 3 of Output compare 4 mode + bitOffset: 24 + bitWidth: 1 + TIM[34]: CNT: _modify:
Disable crontab with empty whitelist Ansible wont overwrite in the case of legit crontab users being added to the whitelist
group: root mode: 0644 +- name: Disable user crontab with whitelist + copy: + content: "" + dest: /etc/cron.allow + force: no + owner: root + group: root + mode: 0644 + - name: Copy over sshd configs copy: src: sshd_config
docs: fix typos in tests-pytest-fixtures.rst Fix typos in tests-pytest-fixtures.rst
@@ -36,7 +36,7 @@ These fixtures provide quick access to Brownie objects that are frequently used .. py:attribute:: chain - Yields an :func:`Chain <brownie.network.state.Chain>` object, used to access block data and interact with the local test chain. + Yields a :func:`Chain <brownie.network.state.Chain>` object, used to access block data and interact with the local test chain. .. code-block:: python :linenos: @@ -136,7 +136,7 @@ For example - if your project contains a contract named ``Token``, there will be Isolation Fixtures ================== -Isolation fixtures are used ensure a clean test environment when running tests, and to prevent the results of a test from affecting subsequent tests. See :ref:`pytest-fixtures-isolation` for information on how to use these fixtures. +Isolation fixtures are used to ensure a clean test environment when running tests, and to prevent the results of a test from affecting subsequent tests. See :ref:`pytest-fixtures-isolation` for information on how to use these fixtures. .. py:attribute:: module_isolation
Support `when_type` for singledispatch This makes singledispatch backwards compatible enough with simplegeneric.
@@ -5,18 +5,38 @@ Aspects form intermediate items between tools and items. from __future__ import absolute_import import sys +import warnings from builtins import object if sys.version_info.major >= 3: # Modern Python - from functools import singledispatch + from functools import singledispatch as real_singledispatch else: - from singledispatch import singledispatch + from singledispatch import singledispatch as real_singledispatch from gi.repository import Gdk from gaphas.item import Item, Element +def singledispatch(func): + """ + Wrapper around singledispatch(), with an extra compatibility function + so code will not break when upgrading from 1.0 to 1.1. + """ + wrapped = real_singledispatch(func) + + def when_type(cls): + warnings.warn( + "when_type: is deprecated, use `register` instead", + category=DeprecationWarning, + stacklevel=2, + ) + return wrapped.register(cls) + + wrapped.when_type = when_type + return wrapped + + class ItemFinder(object): """ Find an item on the canvas.
TST: fixed registration Added pysat registration of test instruments.
@@ -654,7 +654,8 @@ class TestFmtCols(): class TestAvailableInst(): - + setup = pysat.tests.test_registry.TestRegistration.setup + teardown = pysat.tests.test_registry.TestRegistration.teardown @pytest.mark.parametrize("inst_loc", [None, pysat.instruments]) @pytest.mark.parametrize("inst_flag, plat_flag", [(None, None), (False, False), (True, True)])
Server: Optimize the DICOM search & add filter k
@@ -27,6 +27,7 @@ from girder.api import access from girder.api.describe import Description, autoDescribeRoute from girder.api.rest import Resource from girder.constants import AccessType, TokenScope +from girder.exceptions import RestException from girder.models.item import Item from girder.models.file import File from girder.utility import search @@ -201,26 +202,29 @@ def dicomSubstringSearchHandler(query, types, user=None, level=None, limit=0, of Provide a substring search on both keys and values. """ if types != ['item']: - raise TypeError('The dicom search is only able to search in Item.') + raise RestException('The dicom search is only able to search in Item.') - # The insensitive Case is not perfect - # We will need to search into object value as improvment. jsQuery = """ - return Object.keys(obj.dicom.meta).some( + function() { + var queryKey = '%(queryKey)s'.toLowerCase(); + var queryValue = '%(queryValue)s'.toLowerCase(); + var dicomMeta = obj.dicom.meta; + return Object.keys(dicomMeta).some( function(key) { - return (key.toLowerCase().indexOf('%(key)s'.toLowerCase()) !== -1) || - obj.dicom.meta[key].toString().toLowerCase() - .indexOf('%(value)s'.toLowerCase()) !== -1; + return (key.toLowerCase().indexOf(queryKey) !== -1) || + dicomMeta[key].toString().toLowerCase().indexOf(queryValue) !== -1; }) - """ % {'key': query, 'value': query} - mongoQuery = {'dicom': {'$exists': True}, '$where': jsQuery} + } + """ % {'queryKey': query, 'queryValue': query} # Sort the documents inside MongoDB - cursor = Item().find(mongoQuery) - + cursor = Item().find({'dicom': {'$exists': True}, '$where': jsQuery}) # Filter the result result = { - 'item': [Item().filter(d, user) for d in cursor] + 'item': [ + Item().filter(doc, user) + for doc in Item().filterResultsByPermission(cursor, user, level, limit, offset) + ] } return result @@ -231,7 +235,6 @@ def load(info): events.bind('data.process', 'dicom_viewer', _uploadHandler) # Add the DICOM search mode only once - if search.getSearchModeHandler('dicom') is None: search.addSearchMode('dicom', dicomSubstringSearchHandler) dicomItem = DicomItem()
Fixed bug in rbac Removed quotes in namespace for subject
@@ -37,6 +37,6 @@ roleRef: name: {{ template "ambassador.fullname" . }} subjects: - name: {{ template "ambassador.serviceAccountName" . }} - namespace: {{ .Release.Namespace | quote }} + namespace: {{ .Release.Namespace }} kind: ServiceAccount {{- end -}}
Add `remove()` to Reaction Added a coro, `remove()` which takes in a sole parameter, `member`. This new coro will remove the reaction by the provided member from the reactions message. `message.remove_reaction(reaction, member)` was not removed as to not introduce breaking changes.
@@ -93,6 +93,34 @@ class Reaction: def __repr__(self): return '<Reaction emoji={0.emoji!r} me={0.me} count={0.count}>'.format(self) + async def remove(self, user): + """|coro| + + Remove the reaction by the provided :class:`User` from the message. + + If the reaction is not your own (i.e. ``user`` parameter is not you) then + the :attr:`discord.permissions.Permissions.manage_messages` permission is needed. + + The ``user`` parameter must represent a user or member and meet + the :class:`abc.Snowflake` abc. + + Parameters + ----------- + user: :class:`abc.Snowflake` + The user or member from which to remove the reaction. + + Raises + ------- + HTTPException + Removing the reaction failed. + Forbidden + You do not have the proper permissions to remove the reaction. + NotFound + The user you specified, or the reaction's message was not found. + """ + + await self.message.remove_reaction(self.emoji, user) + def users(self, limit=None, after=None): """Returns an :class:`AsyncIterator` representing the users that have reacted to the message.
update summary/views.py add a try/except block to get as many summary fields as possible
@@ -249,10 +249,7 @@ def summary(request, user_uuid): if len(message.keys()) > 0: results['messages'][message.get('message_type') or "type"] = message.get('message') or "" - if results['status'].lower() != "optimal": - json_response['scenarios'].append(results) - continue - + try: site = get_scenario_data(sites, scenario.run_uuid)[0] load = get_scenario_data(loads, scenario.run_uuid)[0] batt = get_scenario_data(batts, scenario.run_uuid)[0] @@ -364,8 +361,12 @@ def summary(request, user_uuid): else: results['batt_kw'] = 'not evaluated' results['batt_kwh'] = 'not evaluated' - + except: json_response['scenarios'].append(results) + continue + else: + json_response['scenarios'].append(results) + response = JsonResponse(json_response, status=200) return response
api_types: Support all Zulip 2.1+ topic links message formats. The `topic_links` parameter is new in Zulip 3.0 (feature level 1). Previously it was named `subject_links`. Since projected Zulip 4.0 (feature level 46) `topic_links` changed: * earlier: ['www.link1.com'] * now: [{'url': 'www.link1.com', 'text': 'My link']}
@@ -30,7 +30,11 @@ class Message(TypedDict, total=False): timestamp: int client: str subject: str # Only for stream msgs. - topic_links: List[str] + # NOTE: new in Zulip 3.0 / ZFL 1, replacing `subject_links` + # NOTE: API response format of `topic_links` changed in Zulip 4.0 / ZFL 46 + topic_links: List[Any] + # NOTE: `subject_links` in Zulip 2.1; deprecated from Zulip 3.0 / ZFL 1 + subject_links: List[str] is_me_message: bool reactions: List[Dict[str, Any]] submessages: List[Dict[str, Any]]
Added Prime Number to FORTRAN README Added Prime to Fortran Readme
@@ -15,6 +15,7 @@ to an existing article which provides further documentation. - :warning: [Even-Odd in Fortran][even-odd-article-issue] - :warning: [Factorial in Fortran][factorial-article-issue] - :warning: [Hello World in Fortran][hello-world-article-issue] +- :warning: [Prime in Fortran][prime-number-article-issue] - :warning: [Reverse String in Fortran][reverse-string-article-issue] ## Fun Facts @@ -26,11 +27,13 @@ to an existing article which provides further documentation. - [Fortran Wiki][fortan-wiki] -[baklava-article-issue]: https://github.com/TheRenegadeCoder/sample-programs/issues/427 [baklava-repo]: https://github.com/toturkmen/baklava +[fortran-wiki]: https://en.wikipedia.org/wiki/Fortran + +[baklava-article-issue]: https://github.com/TheRenegadeCoder/sample-programs/issues/427 [capitalize-article-issue]: https://github.com/TheRenegadeCoder/sample-programs-website/issues/386 [even-odd-article-issue]: https://github.com/TheRenegadeCoder/sample-programs-website/issues/388 [factorial-article-issue]: https://github.com/TheRenegadeCoder/sample-programs-website/issues/387 -[fortran-wiki]: https://en.wikipedia.org/wiki/Fortran [hello-world-article-issue]: https://github.com/jrg94/sample-programs/issues/73 +[prime-number-article-issue]: https://github.com/TheRenegadeCoder/sample-programs-website/issues/397 [reverse-string-article-issue]: https://github.com/TheRenegadeCoder/sample-programs-website/issues/383
Use generator instead of list expand or add method Generator is memory efficient approach.
@@ -68,17 +68,20 @@ class Tree(Generic[_Leaf_T]): return self.data def _pretty(self, level, indent_str): - if len(self.children) == 1 and not isinstance(self.children[0], Tree): - return [indent_str*level, self._pretty_label(), '\t', '%s' % (self.children[0],), '\n'] + yield indent_str*level + yield self._pretty_label() - l = [indent_str*level, self._pretty_label(), '\n'] + if len(self.children) == 1 and not isinstance(self.children[0], Tree): + for i in '\t', '%s' % (self.children[0],), '\n': + yield i + else: + yield '\n' for n in self.children: if isinstance(n, Tree): - l += n._pretty(level+1, indent_str) + yield from n._pretty(level+1, indent_str) else: - l += [indent_str*(level+1), '%s' % (n,), '\n'] - - return l + for i in indent_str*(level+1), '%s' % (n,), '\n': + yield i def pretty(self, indent_str: str=' ') -> str: """Returns an indented string representation of the tree.
Fix docstring information to be clear * Fix docstring information to be clear Updated the docstrings due to errors I encountered: * `is_private` must be 1, normally would expect true or false * `filedata` must be urlencoded base64 to work * Update client.py
@@ -319,13 +319,13 @@ def attach_file(filename=None, filedata=None, doctype=None, docname=None, folder '''Attach a file to Document (POST) :param filename: filename e.g. test-file.txt - :param filedata: base64 encode filedata - :param doctype: Reference DocType to attach file - :param docname: Reference DocName to attach file + :param filedata: base64 encode filedata which must be urlencoded + :param doctype: Reference DocType to attach file to + :param docname: Reference DocName to attach file to :param folder: Folder to add File into :param decode_base64: decode filedata from base64 encode, default is False - :param is_private: Attach file as private file - :param docfield: file to attach to''' + :param is_private: Attach file as private file (1 or 0) + :param docfield: file to attach to (optional)''' request_method = frappe.local.request.environ.get("REQUEST_METHOD")
email-log: Handle checkbox saying "Show text only version". After clicking on checkbox saying "Show text only version" UI was rendered correctly but after refreshing page keeping checkbox checked, emails were shown without "text only version" but checkbox value remained checked. Now after refreshing page checkbox value changes to its default value.
</div> <div style="text-align:right"> <label> - <input type="checkbox" id="toggle"/> + <input type="checkbox" autocomplete="off" id="toggle"/> <strong>Show text only version</strong> </label> <a href="#" data-toggle="modal" data-target="#forward_email_modal">
Frame : Minor tweaks Remove unused include Don't use GraphLayer::Nodes, because Frame has nothing to do with graph drawing Always call the base class doRenderLayer() method
#include "IECore/MeshPrimitive.h" #include "IECore/SimpleTypedData.h" -#include "math.h" - using namespace GafferUI; using namespace IECore; using namespace Imath; @@ -75,14 +73,12 @@ Imath::Box3f Frame::bound() const void Frame::doRenderLayer( Layer layer, const Style *style ) const { - if( layer != GraphLayer::Nodes ) + IndividualContainer::doRenderLayer( layer, style ); + if( layer != Layer::Main ) { return; } Imath::Box3f b = IndividualContainer::bound(); - style->renderFrame( Box2f( V2f( b.min.x, b.min.y ), V2f( b.max.x, b.max.y ) ), m_border ); - - IndividualContainer::doRenderLayer( layer, style ); }
Remove nodename when creating hsbench-pod. Fixes issue - 4233
@@ -51,7 +51,6 @@ class HsBench(object): # Create test pvc+pod log.info(f"Create Golang pod to generate S3 workload... {self.namespace}") pvc_size = "50Gi" - node_name = "compute-0" self.pod_name = "hsbench-pod" self.pvc_obj = helpers.create_pvc( sc_name=constants.DEFAULT_STORAGECLASS_RBD, @@ -63,7 +62,6 @@ class HsBench(object): namespace=self.namespace, pod_name=self.pod_name, pvc_name=self.pvc_obj.name, - node_name=node_name, sa_name=self.sa_name, pod_dict_path=self.pod_dic_path, dc_deployment=True,
add listen to state.pkg Allows for using listen and listen_in in salt-ssh
@@ -1691,6 +1691,7 @@ def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs): st_ = salt.state.State(popts, pillar=pillar) snapper_pre = _snapper_pre(popts, kwargs.get('__pub_jid', 'called localy')) ret = st_.call_chunks(lowstate) + ret = st_.call_listen(lowstate, ret) try: shutil.rmtree(root) except (IOError, OSError):
Update floorplan.tcl save num columns in floorplan to use in pin-assignment
@@ -70,6 +70,7 @@ foreach_in_collection tile [get_cells -hier -filter "ref_name=~Tile_PE* || ref_n # Get grid height/width from max_row/col set grid_num_rows [expr $max_row - $min_row + 1] set grid_num_cols [expr $max_col - $min_col + 1] +set savedvars(grid_num_cols) $grid_num_cols # Multiply separation params by respective pitches to calculate actual separation numbers set tile_separation_x [expr $tile_separation_x * $horiz_pitch] set tile_separation_y [expr $tile_separation_y * $vert_pitch]
fix: correct inconsistencies in .processors.{find,Processors.find} correct inconsistencies between arguments of .processors.find and .processors,Processors.find (and .backends.Parsers.find).
@@ -227,16 +227,18 @@ class Processors(object): """ return find_by_type(ptype, self.list(sort=False)) - def find(self, ipath, ptype=None, + def find(self, obj, forced_type=None, cls=anyconfig.models.processor.Processor): """ - :param ipath: file path - :param ptype: Processor's type or None + :param obj: + a file path, file or file-like object, pathlib.Path object or + `~anyconfig.globals.IOInfo` (namedtuple) object + :param forced_type: Forced processor type to find :param cls: A class object to compare with `ptype` :return: an instance of processor class to process `ipath` data later :raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError """ - return find(ipath, self.list(sort=False), forced_type=ptype, cls=cls) + return find(obj, self.list(sort=False), forced_type=ptype, cls=cls) # vim:sw=4:ts=4:et:
ci: test with all current python versions. Python 3.6 is EOL and 3.10 is out since a while, make sure we test with those.
@@ -59,7 +59,7 @@ jobs: strategy: max-parallel: 4 matrix: - python-version: [3.9, 3.8, 3.7, 3.6] + python-version: ["3.7", "3.8", "3.9", "3.10"] steps: - id: checkout-code
simplify Multiply._add This patch removes simplifications of type (a*b)+a -> a*(b+1), which are not considered important. The remaining simplification (a*b)+(a*c) -> a*(b+c) is left in place in slightly more efficient form.
@@ -1721,13 +1721,9 @@ class Multiply(Array): def _add(self, other): func1, func2 = self.funcs - if other == func1: - return Multiply([func1, Add([func2, ones_like(func2)])]) - if other == func2: - return Multiply([func2, Add([func1, ones_like(func1)])]) - if isinstance(other, Multiply) and not self.funcs.isdisjoint(other.funcs): - f = next(iter(self.funcs & other.funcs)) - return Multiply([f, Add(self.funcs + other.funcs - [f,f])]) + if isinstance(other, Multiply): + for common in self.funcs & other.funcs: + return common * Add(self.funcs + other.funcs - [common, common]) def _determinant(self, axis1, axis2): func1, func2 = self.funcs
Decrement Guild.member_count even if member is not cached Fix
@@ -686,11 +686,11 @@ class ConnectionState: def parse_guild_member_remove(self, data): guild = self._get_guild(int(data['guild_id'])) if guild is not None: + guild._member_count -= 1 user_id = int(data['user']['id']) member = guild.get_member(user_id) if member is not None: guild._remove_member(member) - guild._member_count -= 1 self.dispatch('member_remove', member) else: log.warning('GUILD_MEMBER_REMOVE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
Support jupyter server root_dir with lab extension Jupyter sends path relative to the root_dir which can be different from the cwd. This commit fixes the lab extension for panel preview to account for root_dir. Reference:
from urllib.parse import urljoin import tornado +import os from bokeh.command.util import build_single_handler_application from bokeh.embed.bundle import extension_dirs @@ -65,6 +66,17 @@ class ServerApplicationProxy: def __init__(self, app, **kw): self._app = app + @property + def root_dir(self): + """ + Gets the root directory of the jupyter server app + + This is useful as the path sent received by the handler + may be different from the root dir. + Reference: https://github.com/holoviz/panel/issues/3170 + """ + return self._app.settings['server_root_dir'] + def __getattr__(self, key): return getattr(self._app, key) @@ -81,6 +93,7 @@ class PanelHandler(DocHandler): pass async def get(self, path, *args, **kwargs): + path = os.path.join(self.application.root_dir, path) if path in _APPS: app, context = _APPS[path] else: @@ -122,6 +135,7 @@ class PanelWSHandler(WSHandler): pass async def open(self, path, *args, **kwargs): + path = os.path.join(self.application.root_dir, path) _, context = _APPS[path] token = self._token
Remove references to py34 from developer guide Developer guide should not include references to Python 3.4 or py34, since this Python version is not supported. Related-Bug:
@@ -8,9 +8,9 @@ This is a quick walkthrough to get you started developing code for Ironic. This assumes you are already familiar with submitting code reviews to an OpenStack project. -The gate currently runs the unit tests under Python 2.7, Python 3.4 -and Python 3.5. It is strongly encouraged to run the unit tests locally prior -to submitting a patch. +The gate currently runs the unit tests under Python 2.7 and Python 3.5. It +is strongly encouraged to run the unit tests locally prior to submitting a +patch. .. note:: Do not run unit tests on the same environment as devstack due to @@ -77,11 +77,11 @@ it, follow the instructions for installing prerequisites above and sudo tar xzf Python-3.5.2.tgz cd Python-3.5.2 sudo ./configure - sudo make altinstall - # This will install Python 3.5 without replacing 3.4. To check if 3.5 was installed properly - run this command: + # Install Python 3.5 without replacing the system-wide Python version: + sudo make altinstall + # Check if Python 3.5 was installed properly: python3.5 -V - On Fedora 23:: @@ -120,7 +120,7 @@ Running Unit and Style Tests All unit tests should be run using tox. To run Ironic's entire test suite:: - # to run the py27, py34, py35 unit tests, and the style tests + # to run the py27, py35 unit tests, and the style tests tox To run a specific test or tests, use the "-e" option followed by the tox target @@ -130,13 +130,13 @@ name. For example:: tox -epy27 -epep8 .. note:: - If tests are run under py27 and then run under py34 or py35 the following error may occur:: + If tests are run under py27 and then run under py35 the following error may occur:: db type could not be determined ERROR: InvocationError: '/home/ubuntu/ironic/.tox/py35/bin/ostestr' To overcome this error remove the file `.testrepository/times.dbm` - and then run the py34 or py35 test. + and then run the py35 test. You may pass options to the test programs using positional arguments. To run a specific unit test, this passes the -r option and desired test
accept splitting at the tail of dataset issue:
@@ -101,7 +101,7 @@ def split_dataset(dataset, split_at, order=None): .format(type(split_at))) if split_at < 0: raise ValueError('split_at must be non-negative') - if split_at >= n_examples: + if split_at > n_examples: raise ValueError('split_at exceeds the dataset size') subset1 = SubDataset(dataset, 0, split_at, order) subset2 = SubDataset(dataset, split_at, n_examples, order)
[IMPR] Don't trust token from NeedToken response It can stop working any time soon and it can also be invalid if multiple login attempts because of endless loops. generate fresh login token on every login attempt copy _logged_in() from api.Request keep track of such issues
@@ -3140,12 +3140,12 @@ class LoginManager(login.LoginManager): if self.site.family.ldapDomain: login_request[self.keyword('ldap')] = self.site.family.ldapDomain + self.site._loginstatus = -2 # IN_PROGRESS + while True: # get token using meta=tokens if supported if not below_mw_1_27: login_request[self.keyword('token')] = self.get_login_token() - self.site._loginstatus = -2 # IN_PROGRESS - while True: # try to login login_result = login_request.submit() @@ -3163,10 +3163,11 @@ class LoginManager(login.LoginManager): fail_reason = response.get(self.keyword('reason'), '') if status == self.keyword('success'): return '' - elif status == 'NeedToken': - # Kept for backwards compatibility - token = response['token'] - login_request['lgtoken'] = token + elif status in ('NeedToken', 'WrongToken') and not below_mw_1_27: + # if incorrect login token was used, + # force relogin and generate fresh one + pywikibot.error('Received incorrect login token. ' + 'Forcing re-login.') continue elif (status == 'Throttled' or status == 'FAIL' and response['messagecode'] == 'login-throttled'
omit group conv NHWC test for HIP Summary: Pull Request resolved: broke ROCM test. We don't have group conv in NHWC for hip yet and this diff omits related tests.
@@ -81,7 +81,9 @@ class TestConvolution(serial.SerializedTestCase): kernel, size, input_channels, output_channels, batch_size, group, order, engine, shared_buffer, use_bias, gc, dc): # TODO: Group conv in NHWC not implemented for GPU yet. - assume(group == 1 or order == "NCHW" or gc.device_type != caffe2_pb2.CUDA) + assume(group == 1 or order == "NCHW" or gc.device_type == caffe2_pb2.CPU) + if group != 1 and order == "NHWC": + dc = [d for d in dc if d.device_type == caffe2_pb2.CPU] input_channels *= group output_channels *= group @@ -208,9 +210,11 @@ class TestConvolution(serial.SerializedTestCase): # TODO: Group conv in NHWC not implemented for GPU yet. assume( group == 1 - or (order == "NCHW" or gc.device_type != caffe2_pb2.CUDA) + or (order == "NCHW" or gc.device_type == caffe2_pb2.CPU) and engine != "MKLDNN" ) + if group != 1 and order == "NHWC": + dc = [d for d in dc if d.device_type == caffe2_pb2.CPU] input_channels *= group output_channels *= group