message
stringlengths
13
484
diff
stringlengths
38
4.63k
Updated part qty calculation Updated part qty calculation to allow multiple boards
@@ -30,7 +30,7 @@ __author__ = 'Hildo Guillardi Junior' __webpage__ = 'https://github.com/hildogjr/' __company__ = 'University of Campinas - Brazil' -__all__ = ['subpart_split','subpart_qty','groups_sort'] +__all__ = ['subpart_split','part_qty','groups_sort'] # Qty and part separators are escaped by preceding with '\' = (?<!\\) QTY_SEPRTR = r'(?<!\\)[:]' # Separator for the subpart quantity and the part number. @@ -224,7 +224,7 @@ def subpart_split(components): return split_components -def subpart_qty(component): +def part_qty(component): ''' Calculate the string of the quantity of the item parsing the referente (design) quantity and the sub quantity (in case that @@ -248,7 +248,8 @@ def subpart_qty(component): if logger.isEnabledFor(DEBUG_OBSESSIVE): print('Qty>>',component.refs,'>>',len(component.refs)) string = '={{}}*{qty}'.format(qty=len(component.refs)) - return string + print('DDD>>',component.qty) + return string.format(component.qty)
Add resource requirements and repeats for star search feature importance Summary: Pull Request resolved: as titled
@@ -41,7 +41,9 @@ class FeatureImportancePerturbation(FeatureImportanceBase): perturbed_data = self.perturb_fn(copy_data, feature_idx) perturbed_pred_value = self.pred_fn(self.model, perturbed_data) feature_importance_vals[feature_id].append( - torch.mean(torch.abs(perturbed_pred_value - pred_value)) + torch.mean( + torch.abs(perturbed_pred_value - pred_value) + ).detach() ) logger.info(f"Processed {batch_idx} batches {r}-th time")
Fix parsing 'ATM1/0.1 point-to-point' interface HG-- branch : feature/microservices
# Vendor: Cisco # OS: IOS # --------------------------------------------------------------------- -# Copyright (C) 2007-2016 The NOC Project +# Copyright (C) 2007-2017 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- @@ -64,6 +64,8 @@ class Profile(BaseProfile): self.convert_interface_name_cisco(l.strip()), int(r.strip()) ) + if " point-to-point" in interface: + interface = interface.replace(" point-to-point", "") if ".ServiceInstance." in interface: interface = interface.replace(".ServiceInstance.", ".SI.") if ".SI." in interface:
[daemon] increase startup timeout to 20 sec a long timeout may be necessary when starting the daemon on login
@@ -323,7 +323,7 @@ def is_running(config_name: str) -> bool: return maestral_lock(config_name).locked() -def wait_for_startup(config_name: str, timeout: float = 10) -> None: +def wait_for_startup(config_name: str, timeout: float = 20) -> None: """ Waits until we can communicate with the maestral daemon for ``config_name``. @@ -498,7 +498,7 @@ def start_maestral_daemon( def start_maestral_daemon_process( - config_name: str = "maestral", timeout: int = 10 + config_name: str = "maestral", timeout: float = 20 ) -> Start: """ Starts the Maestral daemon in a new process by calling :func:`start_maestral_daemon`. @@ -556,7 +556,7 @@ def start_maestral_daemon_process( def stop_maestral_daemon_process( - config_name: str = "maestral", timeout: float = 10 + config_name: str = "maestral", timeout: float = 20 ) -> Stop: """Stops a maestral daemon process by finding its PID and shutting it down.
Do not reload msl data from file This seems to be the cause of the Kodi crash on android for some reason the wv crypto session become closed after RestoreKeys and when crypto_session.Encrypt is called for the manifest req kodi crashes
@@ -22,8 +22,8 @@ from resources.lib.globals import G from resources.lib.services.nfsession.msl.msl_request_builder import MSLRequestBuilder from resources.lib.services.nfsession.msl.msl_utils import (ENDPOINTS, create_req_params, generate_logblobs_params, - MSL_DATA_FILENAME, MSL_AUTH_NETFLIXID, - MSL_AUTH_USER_ID_TOKEN, MSL_AUTH_EMAIL_PASSWORD) + MSL_AUTH_NETFLIXID, MSL_AUTH_USER_ID_TOKEN, + MSL_AUTH_EMAIL_PASSWORD) from resources.lib.utils.esn import get_esn from resources.lib.utils.logging import LOG, measure_exec_time_decorator @@ -105,10 +105,7 @@ class MSLRequests(MSLRequestBuilder): LOG.debug('MSL MasterToken is not available, a new key handshake will be performed') is_handshake_required = True if is_handshake_required: - if self.perform_key_handshake(): - msl_data = json.loads(common.load_file_def(MSL_DATA_FILENAME)) - self.crypto.load_msl_data(msl_data) - self.crypto.load_crypto_session(msl_data) + self.perform_key_handshake() def _get_user_auth_data(self): """
cloud roster: Fix vm fields passed to salt.config.get_cloud_config_value salt.config.get_cloud_config_value expects 'driver' key instead of 'provider' in the 'vm_' argument. 'provider' is deprecated and its support was removed in commit ("Remove support for "provider" in cloud provider configs"). Use 'driver' key instead.
@@ -43,7 +43,7 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613 for minion_id, full_info in minions.items(): profile, provider = full_info.get('profile', None), full_info.get('provider', None) vm_ = { - 'provider': provider, + 'driver': provider, 'profile': profile, } public_ips = full_info.get('public_ips', [])
add resources on packaging Section `Resources on Python packaging` did not point to any resource. Added as discussed on
@@ -225,5 +225,6 @@ parsed by ``setuptool`` to ease the pain of transition. Resources on Python packaging ============================= -Packaging in Python is hard. Here we provide a list of links for those that -want to learn more. +Packaging in Python can be hard and is constantly evolving. +`Python Packaging User Guide <https://packaging.python.org>`_ has tutorials and +up-to-date references.
Fix bug in sampler log_prob dim Summary: Pull Request resolved: Fixing a bug introduced in
@@ -107,7 +107,7 @@ class GreedyActionSampler(Sampler): def log_prob(self, scores: torch.Tensor, action: torch.Tensor) -> torch.Tensor: greedy_indices = self._get_greedy_indices(scores) match = greedy_indices == action.argmax(-1) - lp = torch.zeros_like(scores).float() + lp = torch.zeros(scores.shape[0], device=scores.device).float() lp[match] = -float("inf") return lp @@ -168,7 +168,7 @@ class EpsilonGreedyActionSampler(Sampler): max_index = self.sample_action(scores).argmax(-1) opt = max_index == action.argmax(-1) n = len(scores) - lp = torch.ones_like(scores) * self.epsilon / n + lp = torch.ones(n, device=scores.device) * self.epsilon / n lp[opt] = 1 - self.epsilon + self.epsilon / n return lp
Update Using Generics.md The previous solution was incorrect
@@ -103,11 +103,11 @@ Solution return(this.favorite3); } } - public static main(String[] args){ - List<Float> r=new ArrayList<>(); //can also be double or any other that supports decimals + public static void main(String[] args){ + List<Double> r=new ArrayList<>(); //can also be double or any other that supports decimals r.add(6.3); r.add(5.9); - FavoriteClasses<String, Integer, Float> a=new FavoriteClasses<>("Hello",67,r.get(0)); //same with int - System.out.println("My favorites are " + a.getFav1() + ", " a.getFav2() + ", and " + a.getFav3() + "."); + FavoriteClasses<String, Integer, Double> a=new FavoriteClasses<>("Hello",67,r.get(0)); //same with int + System.out.println("My favorites are " + a.getFav1() + ", "+ a.getFav2() + ", and " + a.getFav3() + "."); } }
Update actions.py fixed min_order_pct
@@ -197,6 +197,7 @@ class SimpleOrders(TensorTradeActionScheme): order_listener: 'OrderListener' = None, min_order_pct: float = 0.02) -> None: super().__init__() + self.min_order_pct = min_order_pct criteria = self.default('criteria', criteria) self.criteria = criteria if isinstance(criteria, list) else [criteria]
Update Ohio.md politician
@@ -116,7 +116,7 @@ id: oh-columbus-1 Joyce Beatty, an African American congresswoman from Ohio, was sprayed with mace or pepper spray at a protest in Columbus. -tags: celebrity, mace, pepper-spray, spray +tags: politician, mace, pepper-spray, spray id: oh-columbus-2
Fix a bug should use _env_steps instead of _num_env_steps. The current code never stores a checkpoint.
@@ -392,7 +392,7 @@ class RLTrainer(Trainer): if self._num_iterations: time_to_checkpoint = self._trainer_progress._iter_num + checkpoint_interval else: - time_to_checkpoint = self._trainer_progress._num_env_steps + checkpoint_interval + time_to_checkpoint = self._trainer_progress._env_steps + checkpoint_interval while True: t0 = time.time()
CI: Remove build packages Either the packages already exist on the Ubuntu image or are not needed thanks to the use of manylinux wheels.
@@ -19,10 +19,6 @@ jobs: PIP_CACHE_DIR: ".cache/pip" PIP_SRC: ".cache/src" - steps: - - script: sudo apt-get install build-essential curl docker libffi-dev libfreetype6-dev libxml2 libxml2-dev libxslt1-dev zlib1g zlib1g-dev - displayName: 'Install base dependencies' - - task: UsePythonVersion@0 displayName: 'Set Python version' inputs:
analysis_units.unit: fix documented return type (no-tn-check)
@@ -27,7 +27,7 @@ def unit(self, node): :param AbstractExpression node: Node for which we want the embedding analysis unit. - :rtype: AbstractExpression + :rtype: ResolvedExpression """ node_expr = construct(node)
fix(workflow): Remove docstatus field from get_workflow_state_count remove docstatus from get_workflow_state_count as it is not being used and causes error with postgresql as it requires the selected column to either appear in the group by clause or an aggregat function [skip ci]
@@ -136,7 +136,7 @@ def get_workflow_state_count(doctype, workflow_state_field, states): states = frappe.parse_json(states) result = frappe.get_all( doctype, - fields=[workflow_state_field, "count(*) as count", "docstatus"], + fields=[workflow_state_field, "count(*) as count"], filters={workflow_state_field: ["not in", states]}, group_by=workflow_state_field, )
Be explicit about location of environment file This makes the fabfile portable WRT ebmbot's vendored version.
@@ -12,7 +12,8 @@ import os import dotenv import requests -dotenv.read_dotenv('environment') +basedir = os.path.dirname(os.path.abspath(__file__)) +dotenv.read_dotenv(os.path.join(basedir, 'environment')) env.hosts = ['web2.openprescribing.net']
Fix prefix creation in filelog handler Use absolute path for the `basedir`.
@@ -189,7 +189,7 @@ def _create_file_handler(site_config, config_prefix): def _create_filelog_handler(site_config, config_prefix): - basedir = site_config.get(f'{config_prefix}/basedir') + basedir = os.path.abspath(site_config.get(f'{config_prefix}/basedir')) prefix = site_config.get(f'{config_prefix}/prefix') filename_patt = os.path.join(basedir, prefix) append = site_config.get(f'{config_prefix}/append')
Disable memoization in dispatchers TN:
@@ -29,6 +29,10 @@ is Self : ${Self.type.name} := ${Self.type.name} (${property.self_arg_name}); + ## Dispatchers must not memoize: it is the job of the static properties do + ## to it themselves. + <% memoized = property.memoized and not property.is_dispatcher %> + % if property._has_self_entity: Ent : ${Self.type.entity.name} := ${Self.type.entity.name}'(Node => Self, Info => E_Info); @@ -71,7 +75,7 @@ is % endfor % endif - % if property.memoized: + % if memoized: <% key_length = 1 + len(property.arguments) if property.uses_entity_info: @@ -116,7 +120,7 @@ begin end if; % endif - % if property.memoized: + % if memoized: ## If memoization is enabled for this property, look for an already ## computed result for this property. See the declaration of ## Analysis_Context_Type.In_Populate_Lexical_Env for the rationale about @@ -224,7 +228,7 @@ begin ${scopes.finalize_scope(property.vars.root_scope)} % endif - % if property.memoized: + % if memoized: ## If memoization is enabled for this property, save the result for later ## re-use. % if not property.memoize_in_populate: @@ -251,7 +255,7 @@ begin % if (not property.is_dispatcher and \ property.vars.root_scope.has_refcounted_vars(True)) or \ - property.memoized or \ + memoized or \ has_logging: exception when Property_Error => @@ -263,7 +267,7 @@ begin % endfor % endif - % if property.memoized: + % if memoized: % if not property.memoize_in_populate: if not Node.Unit.Context.In_Populate_Lexical_Env then % endif
entities fix error 500 should have self, as `if klass` isn't used is bcz klass is None, so we need to use self.klass in else no?
@@ -102,7 +102,7 @@ class GenericView(FlaskView): obj = klass() form = klass.get_form()(request.form) else: # update - obj = klass.objects.get(id=id) + obj = self.klass.objects.get(id=id) klass = obj.__class__ form = klass.get_form()(request.form, initial=obj._data)
Change dagster project scaffold help text to indicate new folder is created Redo help text in dagster project scaffold to indication that a new folder is created
@@ -23,8 +23,8 @@ def project_cli(): scaffold_command_help_text = ( "Create a folder structure with a single Dagster repository and other files such as " - "workspace.yaml, in the current directory. This CLI enables you to quickly start building " - "a new Dagster project with everything set up." + "workspace.yaml, in the target directory set by the --name option. This CLI enables " + "you to quickly start building a new Dagster project with everything set up." ) from_example_command_help_text = (
fix tests: adapt to breaking change of ipaddress.ip_address in py3.9.5 fixes We can simply remove that test as we don't actually care whether the leading zeroes are allowed. see see
@@ -231,7 +231,7 @@ class TestUtil(ElectrumTestCase): def test_is_ip_address(self): self.assertTrue(is_ip_address("127.0.0.1")) - self.assertTrue(is_ip_address("127.000.000.1")) + #self.assertTrue(is_ip_address("127.000.000.1")) # disabled as result differs based on python version self.assertTrue(is_ip_address("255.255.255.255")) self.assertFalse(is_ip_address("255.255.256.255")) self.assertFalse(is_ip_address("123.456.789.000"))
[hailctl dataproc] Add OmitStackTraceInFastThrow to debug mode options This should hopefully preserve stack traces for more exceptions during execution. See for more detail
@@ -231,8 +231,8 @@ def main(args, pass_through_args): if args.debug_mode: conf.extend_flag('properties', { - "spark:spark.driver.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError", - "spark:spark.executor.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError", + "spark:spark.driver.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow", + "spark:spark.executor.extraJavaOptions": "-Xss4M -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow", }) # default to highmem machines if using VEP
Change in profile parameter as per code In the code, del_all_vols_on_destroy is used and in documentation del_all_vol_on_destroy has been used. Hence there is a mismatch.
@@ -296,7 +296,7 @@ Set up an initial profile at ``/etc/salt/cloud.profiles``: SecurityGroupId: - sg-750af413 del_root_vol_on_destroy: True - del_all_vol_on_destroy: True + del_all_vols_on_destroy: True volumes: - { size: 10, device: /dev/sdf } - { size: 10, device: /dev/sdg, type: io1, iops: 1000 }
popovers.js: Fix keyboard shortcuts with copying link. Fixes
@@ -544,6 +544,12 @@ exports.register_click_handlers = function () { row.find(".alert-copied").css("display", "block"); row.find(".alert-copied").delay(1000).fadeOut(300); + setTimeout(function () { + // The Cliboard library works by focusing to a hidden textarea. + // We unfocus this so keyboard shortcuts, etc., will work again. + $(":focus").blur(); + }, 0); + e.stopPropagation(); e.preventDefault(); });
docs: add note pip v21.3 is needed for editable install Ref
@@ -7,8 +7,8 @@ Setting up a development environment If you want to hack on Fava or run the latest development version, make sure you have the following installed (ideally with your system package manager): -- `Python 3`_ (with `pip` to install the Fava Python package), -- `Node.js`_ (with `npm` to install the Javascript dependencies), +- `Python 3`_ - with `pip` (at least v21.3), to install the Fava Python package, +- `Node.js`_ - with `npm`, to install the Javascript dependencies, - `tox`_ - to run the Python tests, - `pre-commit`_ - to lint changes with a git pre-commit hook.
Update AbsorptionChiller upfront_capex from the database AbsorptionChiller costs are updated in scenario.py from calculations in techs.py, and these updated values are now pulled into the upfront_capex calculator from a db call (was using self.inputs before)
@@ -685,7 +685,12 @@ def process_results(self, dfm_list, data, meta, saveToDB=True): # storage capacity upfront_capex += (self.inputs["Storage"].get("installed_cost_us_dollars_per_kwh") or 0) * \ (self.nested_outputs["Scenario"]["Site"]["Storage"].get("size_kwh") or 0) - + if self.nested_outputs["Scenario"]["Site"]["AbsorptionChiller"].get("size_ton"): + # Need to update two cost input attributes which are calculated in techs.py and updated in scenario.py + absorp_chl = AbsorptionChillerModel.objects.filter(run_uuid=data['outputs']['Scenario']['run_uuid'])[0] + self.inputs["AbsorptionChiller"].update( + {"installed_cost_us_dollars_per_ton": absorp_chl.installed_cost_us_dollars_per_ton, + "om_cost_us_dollars_per_ton": absorp_chl.om_cost_us_dollars_per_ton}) upfront_capex += (self.inputs["AbsorptionChiller"].get("installed_cost_us_dollars_per_ton") or 0) * \ (self.nested_outputs["Scenario"]["Site"]["AbsorptionChiller"].get("size_ton") or 0)
llvm, composition: Update all dependencies before invoking nested composition Do not update any python structure after invoking nested composition in LLVM node execution mode.
@@ -9194,20 +9194,24 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): elif isinstance(node, Composition): - # Set up compilation if bin_execute: - # Values of Mechanisms are in binary data structure - srcs = (proj.sender.owner for proj in node.input_CIM.afferents if - proj.sender.owner in self._all_nodes and - isinstance(proj.sender.owner, Mechanism)) + # Invoking nested composition passes data via Python + # structures. Make sure all sources get their latest values + srcs = (proj.sender.owner for proj in node.input_CIM.afferents) for srnode in srcs: - assert srnode in self.nodes or srnode is self.input_CIM, \ - "{} is not a valid source node".format(srnode) - data = _comp_ex.extract_frozen_node_output(srnode) - for i, v in enumerate(data): - # This sets frozen values - srnode.output_ports[i].parameters.value._set(v, context, skip_history=True, - skip_log=True) + if srnode is self.input_CIM or srnode in self.nodes: + data_loc = srnode + else: + # Consuming output from another nested composition + assert srnode.composition in self.nodes + assert srnode is srnode.composition.output_CIM + data_loc = srnode.composition + + # Set current Python values to LLVM results + data = _comp_ex.extract_frozen_node_output(data_loc) + for op, v in zip(srnode.output_ports, data): + op.parameters.value._set( + v, context, skip_history=True, skip_log=True) # Pass outer context to nested Composition context.composition = node @@ -9224,20 +9228,16 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): ret = node.execute(context=context, bin_execute=nested_bin_execute) + # Get output info from nested execution + if bin_execute: + # Update result in binary data structure + _comp_ex.insert_node_output(node, ret) + if is_simulating: context.add_flag(ContextFlags.SIMULATION) context.composition = self - # Get output info from compiled execution - if bin_execute: - # Update result in binary data structure - _comp_ex.insert_node_output(node, ret) - for i, v in enumerate(ret): - # Set current output. This will be stored to "new_values" below - node.output_CIM.output_ports[i].parameters.value._set(v, context, skip_history=True, - skip_log=True) - # ANIMATE node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if self._animate is not False and self._animate_unit == COMPONENT: self._animate_execution(node, context)
[internal] flake8 integration test timeout bump Takes slightly longer in macOS CI sometimes. [ci skip-rust] [ci skip-build-wheels]
@@ -8,7 +8,7 @@ python_tests(name="subsystem_test", sources=["subsystem_test.py"]) python_tests( name="rules_integration_test", sources=["rules_integration_test.py"], - timeout=180, + timeout=240, # We want to make sure the default lockfile works for both macOS and Linux. tags=["platform_specific_behavior"], )
trace-level logging for all the chrome output because the important/unimportant messages are always shifting and we're not even trying to keep up with it and mostly it's just noise
@@ -244,31 +244,13 @@ class Chrome: while not self._shutdown.is_set(): buf = readline_nonblock(self.chrome_process.stdout) if buf: - if re.search( - b'Xlib: extension|' - b'CERT_PKIXVerifyCert for [^ ]* failed|' - b'^ALSA lib|ERROR:gl_surface_glx.cc|' - b'ERROR:gpu_child_thread.cc', buf): self.logger.trace( 'chrome pid %s STDOUT %s', self.chrome_process.pid, buf) - else: - self.logger.debug( - 'chrome pid %s STDOUT %s', - self.chrome_process.pid, buf) buf = readline_nonblock(self.chrome_process.stderr) if buf: - if re.search( - b'Xlib: extension|' - b'CERT_PKIXVerifyCert for [^ ]* failed|' - b'^ALSA lib|ERROR:gl_surface_glx.cc|' - b'ERROR:gpu_child_thread.cc', buf): self.logger.trace( - 'chrome pid %s STDOUT %s', - self.chrome_process.pid, buf) - else: - self.logger.debug( 'chrome pid %s STDERR %s', self.chrome_process.pid, buf) except:
[pre_parsers] correct error for s3 objects > 128mb the limit is 128mb, but the error incorrectly mentions 500mb
@@ -88,7 +88,7 @@ class StreamPreParsers(object): size_kb = size / 1024 size_mb = size_kb / 1024 if size_mb > 128: - raise S3ObjectSizeError('S3 object to download is above 500MB') + raise S3ObjectSizeError('S3 object to download is above 128MB') logger.debug('/tmp directory contents:%s ', os.listdir('/tmp')) logger.debug(os.system('df -h /tmp | tail -1'))
Underline only diagnostics that are meet the show_diagnostics_severity_level condition
@@ -252,7 +252,7 @@ class DiagnosticViewRegions(DiagnosticsUpdateWalk): self._relevant_file = False def end(self) -> None: - for severity in range(DiagnosticSeverity.Error, DiagnosticSeverity.Hint + 1): + for severity in range(settings.show_diagnostics_severity_level + 1): region_name = "lsp_" + format_severity(severity) if severity in self._regions: regions = self._regions[severity]
Comment out test that fails on s390. It already failed on windows, so seems flaky, and needs more investigation.
@@ -273,11 +273,11 @@ class TestChecksumFunctions(FitsTestCase): assert 'CHECKSUM' in hdul[1].header assert 'DATASUM' in hdul[1].header - if not sys.platform.startswith('win32'): - # The checksum ends up being different on Windows, possibly due - # to slight floating point differences - assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH' - assert hdul[1]._header['DATASUM'] == '1277667818' + # The checksum ends up being different on Windows and s390/bigendian, + # possibly due to slight floating point differences? See gh-10921. + # TODO fix these so they work on all platforms; otherwise pointless. + # assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH' + # assert hdul[1]._header['DATASUM'] == '1277667818' with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2: header_comp = hdul[1]._header
update docsite blog link Test Plan: have a look-see Reviewers: sashank
@@ -98,7 +98,7 @@ const Header: React.FC<HeaderProps> = ({ onMobileToggleNavigationClick }) => { </VersionedLink> <a - href="https://medium.com/dagster-io" + href="https://dagster.io/blog" target="_blank" className="ml-2 lg:ml-8 inline-flex items-center px-1 pt-1 border-b-2 border-transparent text-sm font-medium leading-5 text-gray-500 hover:text-gray-700 hover:border-gray-300 focus:outline-none focus:text-gray-700 focus:border-gray-300 transition duration-150 ease-in-out" >
Raise error when a bearing is not connected If a bearing/support is not connected to the rotor or to another bearing we now raise an error instead of entering an infinite loop.
@@ -432,6 +432,10 @@ class Rotor(object): node_l = df.loc[ (df.n_link == b.n) & (df.tag != b.tag), "nodes_pos_l" ] + if len(node_l) == 0: + raise ValueError( + f"The following bearing is not connected to the rotor. Check n_link. {b}" + ) node_r = node_l if len(node_l): df.loc[df.tag == b.tag, "nodes_pos_l"] = node_l.values[0]
use session context The only real difference here is that this calls `session.close()` in the finally block instead of `session.remove()`. `remove` calls `close` but it also removes the session from the scope preventing it's re-use.
@@ -277,14 +277,9 @@ class SqlData(ReportDataSource): for c in self.columns: qc.append_column(c.view) - session = connection_manager.get_scoped_session(self.engine_id) - try: + session_helper = connection_manager.get_session_helper(self.engine_id) + with session_helper.session_context() as session: return qc.resolve(session.connection(), self.filter_values) - except Exception: - session.rollback() - raise - finally: - session.remove() @property def data(self):
Removing - Tutorial * Removing - Tutorial Removing Exploring data with Python and Amazon S3 Select by Manav Sehgal as yields page not found (404). * Update tutorial link Thanks to Updated the link for "Exploring data with Python and Amazon S3 Select"
@@ -29,7 +29,7 @@ DataAtWork: URL: https://aws.amazon.com/blogs/big-data/build-a-real-time-stream-processing-pipeline-with-apache-flink-on-aws/ AuthorName: Steffen Hausmann - Title: Exploring data with Python and Amazon S3 Select - URL: https://github.com/aws-samples/aws-open-data-analytics-notebooks/tree/master/exploring-data + URL: https://github.com/aws-samples/cloud-experiments/tree/master/experiments/notebooks/exploring-data AuthorName: Manav Sehgal - Title: Optimizing data for analysis with Amazon Athena and AWS Glue URL: https://github.com/aws-samples/aws-open-data-analytics-notebooks/tree/master/optimizing-data
Update testsRays.py Fixed assertEqual() to assertPrints() in progress test
@@ -148,7 +148,7 @@ class TestRays(envtest.RaytracingTestCase): rays = [Ray(0, 0)] rays = Rays(rays) rays.iteration = 1 - self.assertEqual(rays.displayProgress, "") + self.assertPrints(rays.displayProgress, "") def testRayCountHistogramBinCountSpecified(self): r = [Ray(a, a) for a in range(6)]
Remove login retrying Unnecessary clutter, easy to implement if required by the user.
@@ -51,13 +51,12 @@ class Client: """ return self._uid - def __init__(self, email, password, max_tries=5, session_cookies=None): + def __init__(self, email, password, session_cookies=None): """Initialize and log in the client. Args: email: Facebook ``email``, ``id`` or ``phone number`` password: Facebook account password - max_tries (int): Maximum number of times to try logging in session_cookies (dict): Cookies from a previous session (Will default to login if these are invalid) Raises: @@ -75,7 +74,7 @@ class Client: or not self.setSession(session_cookies) or not self.isLoggedIn() ): - self.login(email, password, max_tries) + self.login(email, password) """ INTERNAL REQUEST METHODS @@ -154,7 +153,7 @@ class Client: return False return True - def login(self, email, password, max_tries=5): + def login(self, email, password): """Login the user, using ``email`` and ``password``. If the user is already logged in, this will do a re-login. @@ -162,33 +161,20 @@ class Client: Args: email: Facebook ``email`` or ``id`` or ``phone number`` password: Facebook account password - max_tries (int): Maximum number of times to try logging in Raises: FBchatException: On failed login """ self.onLoggingIn(email=email) - if max_tries < 1: - raise ValueError("Cannot login: max_tries should be at least one") - if not (email and password): raise ValueError("Email and password not set") - for i in range(1, max_tries + 1): - try: self._state = _state.State.login( email, password, on_2fa_callback=self.on2FACode ) self._uid = self._state.user_id - except Exception: - if i >= max_tries: - raise - log.exception("Attempt #{} failed, retrying".format(i)) - time.sleep(1) - else: self.onLoggedIn(email=email) - break def logout(self): """Safely log out the client.
Add edit delta to modlog for multi-edit messages To help with self-bot detection, if a message has been previously edited, generate a human-readable delta between the last edit and the new one Use message timestamp for modlog embeds generated during on_message event Visually separate send_log_message kwargs to make them easier to read
@@ -104,9 +104,19 @@ class ModLog: self._ignored[event].append(item) async def send_log_message( - self, icon_url: Optional[str], colour: Colour, title: Optional[str], text: str, - thumbnail: str = None, channel_id: int = Channels.modlog, ping_everyone: bool = False, - files: List[File] = None, content: str = None, additional_embeds: List[Embed] = None, + self, + icon_url: Optional[str], + colour: Colour, + title: Optional[str], + text: str, + thumbnail: str = None, + channel_id: int = Channels.modlog, + ping_everyone: bool = False, + files: List[File] = None, + content: str = None, + additional_embeds: List[Embed] = None, + timestamp_override: datetime.datetime = None, + footer_override: str = None, ): embed = Embed(description=text) @@ -114,8 +124,15 @@ class ModLog: embed.set_author(name=title, icon_url=icon_url) embed.colour = colour + + if timestamp_override: + embed.timestamp = timestamp_override + else: embed.timestamp = datetime.datetime.utcnow() + if footer_override: + embed.set_footer(text=footer_override) + if thumbnail is not None: embed.set_thumbnail(url=thumbnail) @@ -676,14 +693,28 @@ class ModLog: f"{after.clean_content}" ) + if before.edited_at: + # Message was previously edited, to assist with self-bot detection, use the edited_at + # datetime as the baseline and create a human-readable delta between this edit event + # and the last time the message was edited + timestamp = before.edited_at + delta = humanize_delta(relativedelta(after.edited_at, before.edited_at)) + footer = f"Last edited {delta} ago" + else: + # Message was not previously edited, use the created_at datetime as the baseline, no + # delta calculation needed + timestamp = before.created_at + footer = None + + print(timestamp, footer) await self.send_log_message( - Icons.message_edit, Colour.blurple(), "Message edited (Before)", - before_response, channel_id=Channels.message_log + Icons.message_edit, Colour.blurple(), "Message edited (Before)", before_response, + channel_id=Channels.message_log, timestamp_override=timestamp, footer_override=footer ) await self.send_log_message( - Icons.message_edit, Colour.blurple(), "Message edited (After)", - after_response, channel_id=Channels.message_log + Icons.message_edit, Colour.blurple(), "Message edited (After)", after_response, + channel_id=Channels.message_log, timestamp_override=after.edited_at ) async def on_raw_message_edit(self, event: RawMessageUpdateEvent):
Update android_roamingmantis.txt To avoid potential detection in the middle of some strings/domains/etc.
@@ -165,10 +165,10 @@ myau-it.com # Generic trails -\b[a-z]{2}\-[a-z]{2}\.top\b -\bapple\-icloud\.[a-z]{3}\-japan\.com\b -\bjppost\-[a-z]{2,}\.(co|com|top)\b -\bnittsu\-[a-z]{2,}\.(com|top)\b -\bmailsa\-[a-z]{2,}\.(com|top)\b -\bsagawa\-[a-z]{2,}\.(cn|com|top)\b -\byamato\-[a-z]{2,}\.(com|top)\b +^\b[a-z]{2}\-[a-z]{2}\.top\b$ +^\bapple\-icloud\.[a-z]{3}\-japan\.com\b$ +^\bjppost\-[a-z]{2,}\.(co|com|top)\b$ +^\bnittsu\-[a-z]{2,}\.(com|top)\b$ +^\bmailsa\-[a-z]{2,}\.(com|top)\b$ +^\bsagawa\-[a-z]{2,}\.(cn|com|top)\b$ +^\byamato\-[a-z]{2,}\.(com|top)\b$
fix broken regression test in method test_build_executor. We expect 5 executors defined in default configuration, previously we had 4.
@@ -24,12 +24,13 @@ def test_build_executor(tmp_path): # Load BuildExecutor be = BuildExecutor(example) - # We should have a total of 4 executors (local.bash, local.sh, local.csh, local.python) - assert len(be.executors) == 4 + # We should have a total of 5 executors (local.bash, local.sh, local.csh, local.zsh, local.python) + assert len(be.executors) == 5 assert list(be.executors.keys()) == [ "local.bash", "local.sh", "local.csh", + "local.zsh", "local.python", ]
tests: update old import from wlauto exec_control tests were still importing from wlauto rather than wa. This rectifies that.
@@ -19,7 +19,7 @@ from unittest import TestCase from nose.tools import assert_equal, assert_raises -from wlauto.utils.exec_control import (init_environment, reset_environment, +from wa.utils.exec_control import (init_environment, reset_environment, activate_environment, once, once_per_class, once_per_instance)
Enable dependency link processing Use `PIP_PROCESS_DEPENDENCY_LINKS` to toggle the processing of dependency links
@@ -92,7 +92,8 @@ setup( assert "version" in pipenv_instance.lockfile["default"]["test-private-dependency"] assert "0.1" in pipenv_instance.lockfile["default"]["test-private-dependency"]["version"] - with PipenvInstance(pypi=pypi, chdir=True) as p: + with temp_environ(), PipenvInstance(pypi=pypi, chdir=True) as p: + os.environ['PIP_PROCESS_DEPENDENCY_LINKS'] = '1' test_deplink(p, 'git+https://github.com/atzannes/[email protected]#egg=test-private-dependency-v0.1') # with PipenvInstance(pypi=pypi, chdir=True) as p:
disable unwanted commands Enabling only for test preparation. Updated test_db to currently used avalon_tests
@@ -228,15 +228,15 @@ class DBHandler: return query # Examples -handler = DBHandler(uri="mongodb://localhost:27017") +# handler = DBHandler(uri="mongodb://localhost:27017") # # -backup_dir = "c:\\projects\\test_zips\\test_maya_publish\\input\\dumps" +# backup_dir = "c:\\projects\\test_zips\\test_maya_publish\\input\\dumps" # # # # handler.backup_to_dump("avalon", backup_dir, True, collection="test_project") -handler.setup_from_dump("test_db", backup_dir, True, db_name_out="avalon", collection="test_project") -# handler.setup_from_sql_file("test_db", "c:\\projects\\sql\\item.sql", +# handler.setup_from_dump("avalon_tests", backup_dir, True, db_name_out="avalon", collection="test_project") +# handler.setup_from_sql_file("avalon_tests", "c:\\projects\\sql\\item.sql", # collection="test_project", # drop=False, mode="upsert") -# handler.setup_from_sql("test_db", "c:\\projects\\sql", +# handler.setup_from_sql("avalon_tests", "c:\\projects\\sql", # collection="test_project", # drop=False, mode="upsert")
[c++] Make visit_function_forward_recursive and visit_function_backward public
@@ -96,15 +96,6 @@ class CgVariable { bool prohibit_clear_data_{false}; string name_{""}; - void - visit_function_recursive(CgFunctionPtr func, - unordered_set<CgFunctionPtr> &fclosed, - std::function<void(CgFunctionPtr)> forward_callback); - - void visit_function_backward( - CgFunctionPtr func, std::function<void(CgFunctionPtr)> backward_callback, - vector<CommunicatorBackwardCallbackPtr> communicator_callbacks); - public: typedef shared_ptr<CgVariable> Ptr; @@ -336,6 +327,19 @@ public: */ NBLA_API Ptr create_deep_copy(Context ctx, bool copy_grad = true); + + /** Execute callback at functions in forward order in a graph. + */ + void + visit_function_recursive(CgFunctionPtr func, + unordered_set<CgFunctionPtr> &fclosed, + std::function<void(CgFunctionPtr)> forward_callback); + + /** Execute callback at functions in backward order in a graph. + */ + void visit_function_backward( + CgFunctionPtr func, std::function<void(CgFunctionPtr)> backward_callback, + vector<CommunicatorBackwardCallbackPtr> communicator_callbacks); }; /** shared_ptr typedef of CGVariable
transfer: keep tab on how much data have we downloaded Previously we only kept tabls on how much we uploaded, also create a metric point for all downloads.
@@ -159,9 +159,9 @@ class TransferAgent(Thread): else: self.state[site][oper][filetype]["failures"] += 1 - if oper == "upload": + if oper in {"download", "upload"}: self.metrics.increase( - "pghoard.upload_size", + "pghoard.{}_size".format(oper), inc_value=oper_size, tags={ "result": "ok" if result["success"] else "failed",
zerver/lib/events: Refactor get_user_profile_by_email to get_user. Fixes
@@ -30,8 +30,8 @@ from zerver.lib.actions import validate_user_access_to_subscribers_helper, \ get_status_dict, streams_to_dicts_sorted from zerver.tornado.event_queue import request_event_queue, get_user_events from zerver.models import Client, Message, Realm, UserPresence, UserProfile, \ - get_user_profile_by_email, get_user_profile_by_id, \ - get_active_user_dicts_in_realm, realm_filters_for_realm, \ + get_user_profile_by_id, \ + get_active_user_dicts_in_realm, realm_filters_for_realm, get_user,\ get_owned_bot_dicts, custom_profile_fields_for_realm, get_realm_domains from zproject.backends import password_auth_enabled from version import ZULIP_VERSION @@ -339,7 +339,10 @@ def apply_event(state, event, user_profile, include_subscribers): # Convert the emails to user_profile IDs since that's what register() returns # TODO: Clean up this situation by making the event also have IDs for item in event["subscriptions"]: - item["subscribers"] = [get_user_profile_by_email(email).id for email in item["subscribers"]] + item["subscribers"] = [ + get_user(email, user_profile.realm).id + for email in item["subscribers"] + ] else: # Avoid letting 'subscribers' entries end up in the list for i, sub in enumerate(event['subscriptions']): @@ -404,7 +407,7 @@ def apply_event(state, event, user_profile, include_subscribers): sub['subscribers'].remove(user_id) elif event['type'] == "presence": # TODO: Add user_id to presence update events / state format! - presence_user_profile = get_user_profile_by_email(event['email']) + presence_user_profile = get_user(event['email'], user_profile.realm) state['presences'][event['email']] = UserPresence.get_status_dict_by_user(presence_user_profile)[event['email']] elif event['type'] == "update_message": # The client will get the updated message directly
Drop unmaintained Fedora rawhide tests We do not support tests in rawhide. Also, rawhide does not ship python 3.6 anymore, making this code path irrelevant.
@@ -71,11 +71,6 @@ fi # Install package $RUN $PKG install -y $PIP_PKG -if [[ $PYTHON_VERSION == 3 && $OS_VERSION == rawhide ]]; then - # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe - $RUN mkdir -p /usr/local/lib/python3.6/site-packages/ -fi - if [[ $OS == centos && $OS_VERSION == 7 ]]; then # Older versions of setuptools don't understand the environment # markers used by docker-squash's requirements
Updated Frequency String for Pandas Timestamp * updated Pandas deprecation * updating Pandas deprecation * Simplify splitter.py convert two Timdelta objects into one * using right frequency for Timestamp
@@ -268,7 +268,7 @@ class DateSplitter(AbstractBaseSplitter, pydantic.BaseModel): def _test_slice( self, item: TimeSeriesSlice, offset: int = 0 ) -> TimeSeriesSlice: - freq = item.start.freq + freq = item.start.freqstr return item[ : self.split_date + pd.Timedelta(self.prediction_length + offset, unit=freq)
Set max filesize 100MB since we have a quota anyhow cherry-pick
@@ -59,7 +59,7 @@ ubuntu - fsize unlimited @competitors - nproc 40 @competitors - as 268435456 @competitors - cpu 3 -@competitors - fsize 10000 +@competitors - fsize 100000 @competitors - core unlimited @problems - nproc 40 @problems - cpu 10
remove apt-get steps from dagit Summary: Do we need this? Test Plan: Lets find out Reviewers: #ft, natekupp
@@ -387,8 +387,6 @@ def dagit_tests(): tests.append( StepBuilder("dagit tests ({ver})".format(ver=TOX_MAP[version])) .run( - "apt-get update", - "apt-get install -y xdg-utils", "pushd python_modules", "make rebuild_dagit", "popd",
update the parameter for find_jar Prevent the find_jar_iter from terminating before searching using environment parameters
@@ -66,7 +66,7 @@ class StanfordSegmenter(TokenizerI): verbose=verbose) if path_to_slf4j is not None: slf4j = find_jar( - 'slf4j-api.jar', path_to_slf4j, + 'slf4j-api.jar', None, env_vars=('SLF4J', 'STANFORD_SEGMENTER',), searchpath=(), url=_stanford_url, verbose=verbose) @@ -75,7 +75,8 @@ class StanfordSegmenter(TokenizerI): # This is passed to java as the -cp option, the old version of segmenter needs slf4j. # The new version of stanford-segmenter-2016-10-31 doesn't need slf4j - # When using old version, set path_to_slf4j='/YOUR_PATH/slf4j-api.jar' in the initilization of StanfordSegmenter + # When using old version, set path_to_slf4j='slf4j-api.jar' in the initilization of StanfordSegmenter + # The initilization program will search for your slf4j-api.jar according to the environment parameters you set self._stanford_jar = os.pathsep.join( [_ for _ in [stanford_segmenter, slf4j] if not _ is None])
Fix named locks implementation * This patch fixes wrong usage of SQLAlchemy session in the implementation of named locks. See bug description for details. Closes-Bug:
@@ -1630,10 +1630,14 @@ def delete_named_lock(lock_id, session=None): @contextlib.contextmanager def named_lock(name): - lock_id = None + # NOTE(rakhmerov): We can't use the well-known try-finally pattern here + # because if lock creation failed then it means that the SQLAlchemy + # session is no longer valid and we can't use to try to delete the lock. + # All we can do here is to let the exception bubble up so that the + # transaction management code could rollback the transaction. - try: lock_id = create_named_lock(name) + yield - finally: + delete_named_lock(lock_id)
Update Changes.md This should have been included the last time we merged `1.0_maintenance` to `1.1_maintenance`.
@@ -17,6 +17,7 @@ Fixes - Fixed distant light angle is in degrees and not radians. - Fixed assignment of `emission` shader. Previously this was being assigned as a `cycles:light` attribute instead of `cycles:surface` (#5058). - ImageViewer : Fixed drawing of pixels to the left of the display window. +- Random : Fixed GIL management bug which could lead to hangs. 1.1.6.1 (relative to 1.1.6.0) =======
Improve the error message when metpy_crs is missing Based on some of my travails from
@@ -232,7 +232,9 @@ class MetPyDataArrayAccessor: """Return the coordinate reference system (CRS) as a CFProjection object.""" if 'metpy_crs' in self._data_array.coords: return self._data_array.coords['metpy_crs'].item() - raise AttributeError('crs attribute is not available.') + raise AttributeError('crs attribute is not available. You may need to use the' + ' `parse_cf` or `assign_crs` methods. Consult the "xarray' + ' with MetPy Tutorial" for more details.') @property def cartopy_crs(self):
schemas: codexentry: README: Allow 'BodyName' without 'BodyID' In the interests of offering listeners at least partial data about the location of a codex entry we will allow `BodyName` to be set from Status.json, but without `BodyID` from the Journal if it is not available.
@@ -58,12 +58,21 @@ release, Update 7, plus one patch). away. 5. If Status.json does **not** have `BodyName` then clear `status_body_name`. 6. For a `CodexEntry` event: - 1. Check that `status_body_name` is set. - 2. ONLY if it is, check if it matches `journal_body_name`. - 3. ONLY if they match, set both `BodyName` and `BodyID` in the EDDN - `codexentry` schema message to the recorded values. - As you just checked that `status_body_name` was set, and it matches - `journal_body_name` it doesn't matter which of the two you use. + 1. Check that `status_body_name` is set. If it is not, exit. + 1. Set the EDDN `codexentry` schema message `BodyName` to this value. + 2. Check if it matches the `journal_body_name` value, and + ONLY if they match, set `BodyID` in the EDDN `codexentry` + schema message to the value of `journal_body_id`. + + If `status_body_name` is not set then you MUST NOT include `BodyName` or + `BodyID` keys/values in the EDDN message. + + If `status_body_name` is set, but does not match with + `journal_body_name` then you MUST NOT include a `BodyID` key+value in the + EDDN message. + + For emphasis, in both of these cases you MUST NOT include the keys with a + `null`, `''`, or otherwise 'empty' value. Do not include the key(s) at all. One possible issue is binary bodies where you might get an `ApproachBody` for one before descending towards the other, without an additional `ApproachBody` @@ -76,8 +85,8 @@ without a new `ApproachBody` event, but `status_body_name` will change to the other when you are close enough. In this case due to `status_body_name` and `journal_body_name` not matching -the `codexentry` message MUST be sent **without** either `BodyName` or -`BodyID`. +the `codexentry` message MUST be sent **without** `BodyID`, but SHOULD be +sent with the `status_body_name` value on the `BodyName` key. e.g. for `Bestia A 2 a` ```json
refactor: Extract window size into a object. This will be passed to the function that will start the recording.
@@ -41,17 +41,28 @@ class CommonUtils { assert.equal(actual_recipients, expected); }, }; + this.fullname = { cordelia: "Cordelia Lear", othello: "Othello, the Moor of Venice", hamlet: "King Hamlet", }; + + this.window_size = { + width: 1400, + height: 1024, + }; } async ensure_browser() { if (this.browser === null) { + const {window_size} = this; this.browser = await puppeteer.launch({ - args: ["--window-size=1400,1024", "--no-sandbox", "--disable-setuid-sandbox"], + args: [ + `--window-size=${window_size.width},${window_size.height}`, + "--no-sandbox", + "--disable-setuid-sandbox", + ], defaultViewport: {width: 1280, height: 1024}, headless: true, });
fix: typo in test_citations Fix typo in test_citations
@@ -49,7 +49,7 @@ class TestCitations(unittest.TestCase): # Bibtext Style with temporary_filename() as filename: - pybamm.print_citations(filename, "text") + pybamm.print_citations(filename, "bibtex") with open(filename, "r") as f: self.assertTrue(len(f.readlines()) > 0)
Update README table of contents This will be a guide to the ordering of examples.
============== DARTS Examples ============== + +Our recommended ordering of examples: + +1. **Uno**: learn how to use the neural network building blocks in DARTS to + define a fully connected model using DARTS. + +2. **Advanced**: how to define our own neural network primitives to be optimized + by DARTS.
add WINDToCurtail, PV{}toCurtail expressions and results "[Tech]toGrid" and "[Tech]toLoad" expressions are updated as well
@@ -962,12 +962,15 @@ function add_wind_results(m, p, r::Dict) @expression(m, WINDtoBatt[ts in p.TimeStep], sum(sum(m[:dvProductionToStorage][b, t, ts] for t in m[:WindTechs]) for b in p.ElecStorage)) r["WINDtoBatt"] = round.(value.(WINDtoBatt), digits=3) + @expression(m, WINDtoCurtail[ts in p.TimeStep], + sum(m[:dvProductionToGrid][t,u,ts] for t in m[:WindTechs], u in p.CurtailmentTiers)) + r["WINDtoCurtail"] = round.(value.(WINDtoCurtail), digits=3) @expression(m, WINDtoGrid[ts in p.TimeStep], - sum(m[:dvProductionToGrid][t,u,ts] for t in m[:WindTechs], u in p.SalesTiers)) + sum(m[:dvProductionToGrid][t,u,ts] for t in m[:WindTechs], u in p.SalesTiersByTech[t]) - WINDtoCurtail[ts]) r["WINDtoGrid"] = round.(value.(WINDtoGrid), digits=3) @expression(m, WINDtoLoad[ts in p.TimeStep], sum(m[:dvRatedProduction][t, ts] * p.ProductionFactor[t, ts] * p.LevelizationFactor[t] - for t in m[:WindTechs]) - WINDtoGrid[ts] - WINDtoBatt[ts] ) + for t in m[:WindTechs]) - WINDtoGrid[ts] - WINDtoBatt[ts] - WINDtoCurtail[ts] ) r["WINDtoLoad"] = round.(value.(WINDtoLoad), digits=3) m[:Year1WindProd] = @expression(m, p.TimeStepScaling * sum(m[:dvRatedProduction][t,ts] * p.ProductionFactor[t, ts] @@ -1001,13 +1004,17 @@ function add_pv_results(m, p, r::Dict) end r[string(PVclass, "toBatt")] = round.(value.(PVtoBatt), digits=3) + PVtoCurtail = @expression(m, [ts in p.TimeStep], + sum(m[:dvProductionToGrid][t,u,ts] for t in PVtechs_in_class, u in p.CurtailmentTiers)) + r[string(PVclass, "toCurtail")] = round.(value.(PVtoCurtail), digits=3) + PVtoGrid = @expression(m, [ts in p.TimeStep], - sum(m[:dvProductionToGrid][t,u,ts] for t in PVtechs_in_class, u in p.SalesTiersByTech[t])) + sum(m[:dvProductionToGrid][t,u,ts] for t in PVtechs_in_class, u in p.SalesTiersByTech[t]) - PVtoCurtail[ts]) r[string(PVclass, "toGrid")] = round.(value.(PVtoGrid), digits=3) PVtoLoad = @expression(m, [ts in p.TimeStep], sum(m[:dvRatedProduction][t, ts] * p.ProductionFactor[t, ts] * p.LevelizationFactor[t] for t in PVtechs_in_class) - - PVtoGrid[ts] - PVtoBatt[ts] + - PVtoGrid[ts] - PVtoBatt[ts] - PVtoCurtail[ts] ) r[string(PVclass, "toLoad")] = round.(value.(PVtoLoad), digits=3)
push notif: Switch from GCM to FCM endpoint. This is the only server-side change required for the FCM migration! Optionally, at some point in the future we might choose to migrate to the new ("v1") API which FCM also offers. Nothing revolutionary but there are some nice things about it:
@@ -13,7 +13,7 @@ from django.conf import settings from django.db import IntegrityError, transaction from django.utils.timezone import now as timezone_now from django.utils.translation import ugettext as _ -from gcm import GCM +import gcm import requests import ujson @@ -175,8 +175,25 @@ def send_apple_push_notification(user_id: int, devices: List[DeviceToken], # Sending to GCM, for Android # +def make_gcm_client() -> gcm.GCM: # nocoverage + # From GCM upstream's doc for migrating to FCM: + # + # FCM supports HTTP and XMPP protocols that are virtually + # identical to the GCM server protocols, so you don't need to + # update your sending logic for the migration. + # + # https://developers.google.com/cloud-messaging/android/android-migrate-fcm + # + # The one thing we're required to change on the server is the URL of + # the endpoint. So we get to keep using the GCM client library we've + # been using (as long as we're happy with it) -- just monkey-patch in + # that one change, because the library's API doesn't anticipate that + # as a customization point. + gcm.gcm.GCM_URL = 'https://fcm.googleapis.com/fcm/send' + return gcm.GCM(settings.ANDROID_GCM_API_KEY) + if settings.ANDROID_GCM_API_KEY: # nocoverage - gcm_client = GCM(settings.ANDROID_GCM_API_KEY) + gcm_client = make_gcm_client() else: gcm_client = None
fix delete category request body revert edit address request body
@@ -311,7 +311,7 @@ script: body['Category ID'] = id; } else if(name){ - body['Category Names'] = name; + body['Category Name'] = name; } if (urls) { body.URLs = urls; @@ -424,7 +424,7 @@ script: body['Category IDs'] = ids; } else if (names) { - body['Category Name'] = names; + body['Category Names'] = names; } var res = transactionFlowRequest(url, 'DELETE', body); if (res.StatusCode !== 200) {
fix(ThreadedQueue): ensure processed matches task_done There's a bug where it's possible for the main thread to proceed after queue.join() stops blocking butbefore self.processed is incremented, resulting in a temporary discrepancy of 1. We invert the order of these operations to ensure that join will not stop blocking until all tasks are accounted for.
@@ -164,10 +164,9 @@ class ThreadedQueue(object): try: fn() finally: - self._queue.task_done() - with self._processed_lock: self.processed += 1 + self._queue.task_done() def wait(self): """
Simplify conda recipes Simplify recipes using `{{ target_platform }}` for `sysroot` package and `compiler()` function in `ignore_run_exports_from` Authors: - Jordan Jacobelli (https://github.com/Ethyling) Approvers: - AJ Schmidt (https://github.com/ajschmidt8) URL:
@@ -18,15 +18,13 @@ build: script_env: - VERSION_SUFFIX ignore_run_exports_from: - - nvcc_linux-64 # [linux64] - - nvcc_linux-aarch64 # [aarch64] + - {{ compiler('cuda') }} requirements: build: - {{ compiler('cxx') }} - {{ compiler('cuda') }} {{ cuda_version }} - - sysroot_linux-64 {{ sysroot_version }} # [linux64] - - sysroot_linux-aarch64 {{ sysroot_version }} # [aarch64] + - sysroot_{{ target_platform }} {{ sysroot_version }} host: - python - setuptools
Added pyupset package Reference:
@@ -397,6 +397,7 @@ RUN pip install --upgrade mpld3 && \ RUN pip install flashtext && \ pip install marisa-trie && \ pip install pyemd && \ + pip install pyupset && \ pip install -e git+https://github.com/SohierDane/BigQuery_Helper#egg=bq_helper && \ ##### ^^^^ Add new contributions above here # clean up pip cache
ceph-osd: replace sysctl command task by slurp Instead of using the command module for retrieving a sysctl value then we can use the slurp module and read the value directly from /proc.
when: disable_transparent_hugepage | bool - name: get default vm.min_free_kbytes - command: sysctl -b vm.min_free_kbytes - changed_when: false - failed_when: false - check_mode: no + slurp: + src: /proc/sys/vm/min_free_kbytes register: default_vm_min_free_kbytes - name: set_fact vm_min_free_kbytes set_fact: - vm_min_free_kbytes: "{{ 4194303 if ansible_memtotal_mb >= 49152 else default_vm_min_free_kbytes.stdout }}" + vm_min_free_kbytes: "{{ 4194303 if ansible_memtotal_mb >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}" - name: apply operating system tuning sysctl:
Introduce "previous_exit_code" global setting. Enable DEBUG logging if previous run of TexText failed due to bug.
@@ -244,6 +244,9 @@ class NestedLoggingGuard(object): return NestedLoggingGuard(self._logger, lvl, message) +EXIT_CODE_OK = 0 +EXIT_CODE_EXPECTED_ERROR = 1 +EXIT_CODE_UNEXPECTED_ERROR = 60 LOG_LOCATION = os.path.dirname(__file__) # todo: check destination is writeable LOG_FILENAME = os.path.join(LOG_LOCATION, "textext.log") # todo: check destination is writeable @@ -324,6 +327,15 @@ try: def __init__(self): + self.settings = Settings() + previous_exit_code = self.settings.get("previous_exit_code", int, EXIT_CODE_OK) + + if previous_exit_code not in [EXIT_CODE_OK, EXIT_CODE_EXPECTED_ERROR]: + logging.disable(logging.NOTSET) + logger.debug("Enforcing DEBUG mode due to previous exit code `%d`" % previous_exit_code) + else: + logging.disable(logging.DEBUG) + logger.debug("TexText initialized") logger.debug("TexText version = %s (md5sum = %s)" % (repr(__version__), hashlib.md5(open(__file__).read()).hexdigest()) @@ -333,8 +345,6 @@ try: inkex.Effect.__init__(self) - self.settings = Settings() - self.OptionParser.add_option( "-t", "--text", action="store", type="string", dest="text", @@ -1476,6 +1486,9 @@ try: if __name__ == "__main__": effect = TexText() effect.affect() + effect.settings.set("previous_exit_code", EXIT_CODE_OK) + effect.settings.save() + except TexTextInternalError as e: # TexTextInternalError should never be raised. @@ -1484,11 +1497,23 @@ except TexTextInternalError as e: logger.error(traceback.format_exc()) logger.info("Please file a bug to https://github.com/textext/textext/issues/new") user_log_channel.show_messages() - exit(60) # TexText internal error + try: + settings = Settings() + settings.set("previous_exit_code", EXIT_CODE_UNEXPECTED_ERROR) + settings.save() + except: + pass + exit(EXIT_CODE_UNEXPECTED_ERROR) # TexText internal error except TexTextFatalError as e: logger.error(e.message) user_log_channel.show_messages() - exit(1) # Bad setup + try: + settings = Settings() + settings.set("previous_exit_code", EXIT_CODE_EXPECTED_ERROR) + settings.save() + except: + pass + exit(EXIT_CODE_EXPECTED_ERROR) # Bad setup except Exception as e: # All errors should be handled by above clause. # If any propagates here it's TexText logic error and should be reported. @@ -1496,4 +1521,10 @@ except Exception as e: logger.error(traceback.format_exc()) logger.info("Please file a bug to https://github.com/textext/textext/issues/new") user_log_channel.show_messages() - exit(60) # TexText internal error + try: + settings = Settings() + settings.set("previous_exit_code", EXIT_CODE_UNEXPECTED_ERROR) + settings.save() + except: + pass + exit(EXIT_CODE_UNEXPECTED_ERROR) # TexText internal error
Update NNI v0.2 release notes Update NNI v0.2 release notes
+# Release 0.2.0 - 9/29/2018 +## Major Features + * Support for [OpenPAI](https://github.com/Microsoft/pai) (aka pai) Training Service + * Support training services on pai mode. NNI trials will be scheduled to run on OpenPAI cluster + * NNI trial's output (including logs and model file) will be copied to OpenPAI HDFS for further debugging and checking + * Support [SMAC](https://www.cs.ubc.ca/~hutter/papers/10-TR-SMAC.pdf) tuner + * [SMAC](https://www.cs.ubc.ca/~hutter/papers/10-TR-SMAC.pdf) is based on Sequential Model-Based Optimization (SMBO). It adapts the most prominent previously used model class (Gaussian stochastic process models) and introduces the model class of random forests to SMBO to handle categorical parameters. The SMAC supported by NNI is a wrapper on [SMAC3](https://github.com/automl/SMAC3) + * Support NNI installation on [conda](https://conda.io/docs/index.html) and python virtual environment + * Others + * Update ga squad example and related documentation + * WebUI UX small enhancement and bug fix + +## Known Issues +[Known Issues in release 0.2.0](https://github.com/Microsoft/nni/labels/nni020knownissues). + # Release 0.1.0 - 9/10/2018 (initial release) Initial release of Neural Network Intelligence (NNI).
buildman: fix kubernetes not returning correct running count Filter the completed or failed jobs from the kubernetes api requests.
@@ -436,6 +436,22 @@ class KubernetesExecutor(BuilderExecutor): @property def running_builders_count(self): + def _completed(job): + if not job.get("status"): + return False + + conditions = job["status"].get("conditions") + if not conditions: + return False + + if ( + conditions[0]["type"] in ("Complete", "Failed") + and conditions[0]["status"] == "True" + ): + return True + + return False + q = {"labelSelector": "build,time,manager,quay-sha"} jobs_list = self._request("GET", self._jobs_path(), params=q) if jobs_list.status_code != 200: @@ -451,7 +467,10 @@ class KubernetesExecutor(BuilderExecutor): jobs_list.status_code, jobs_list.reason, ) - return len(jobs_list.json()["items"]) + + running_jobs = [j for j in jobs_list.json()["items"] if not _completed(j)] + + return len(running_jobs) def _request(self, method, path, **kwargs): request_options = dict(kwargs)
Update Palindrome_Checker.py I have made a small change in lines 6 and 7. Actually we don't need to initialize the variables 'phrase' and 'givenphrase'.
# PALINDROME: A word, phrase, or sequence that reads the same backward as forward samplePhrase = "A man, a plan, a cat, a ham, a yak, a yam, a hat, a canal-Panama!" -givenPhrase = "" -phrase = "" +#givenPhrase = "" +#phrase = "" givenPhrase = input("\nPlease input a phrase:(Press ENTER to use the sample phrase) ")
Use NamedTemporaryFile instead of mkstemp Fix for Windows - mkstemp causes issues if the OS file handle isn't explicitly closed. Use NamedTemporaryFile instead to avoid this issue.
@@ -26,7 +26,7 @@ def test_runner_timeout(): current_proc = psutil.Process() start_procs = current_proc.children() - _, output_json = tempfile.mkstemp(suffix='.json') + output_json = tempfile.NamedTemporaryFile(suffix='.json').name try: proc = subprocess.Popen(
docs: Add setting to conf.py to disable sticky_navigation The left navigation bar defaults to "sticking" to the screen as you scroll. This commit adds the sticky_navigation setting to conf.py to disable the default behavior. This addresses part of
@@ -117,7 +117,9 @@ if not on_rtd: # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +html_theme_options = { + 'sticky_navigation': False, +} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = []
Add latency checkers to Windows optimizations Kudos to "Crescent Spirit#6232" and "-Wassy_-#7799".
@@ -27,3 +27,5 @@ Use [TCP Optimizer](https://www.speedguide.net/downloads.php) to optimize your i 1. Choose your connection speed with the slider at the top. 2. Choose the optimal setting at the bottom. 3. Click apply changes. + +Use [LatencyMon](https://www.resplendence.com/latencymon) and [DPC Latency Checker](https://www.thesycon.de/eng/latency_check.shtml) to analyze latency issues caused by kernel-mode device drivers. If they report issues, try updating your network drivers or installing alternate drivers.
[swarming] convert artificial 503 to 429 This will reduce unactionable alert.
@@ -298,10 +298,11 @@ class TaskDimensionsHandler(webapp2.RequestHandler): @decorators.require_taskqueue('rebuild-task-cache') def post(self): if not task_queues.rebuild_task_cache(self.request.body): - # The task needs to be retried. Reply that the service is unavailable - # (503) instead of an internal server error (500) to help differentiating - # in the logs, even if it is not technically correct. - self.response.set_status(503) + # The task likely failed due to DB transaction contention, + # so we can reply that the service has had too many requests (429). + # Using a 400-level response also prevents failures here from causing + # unactionable alerts due to a high rate of 500s. + self.response.set_status(429) class TaskSendPubSubMessage(webapp2.RequestHandler):
docs: remove code from third party section reason: code should not be present here, it's hard to mantain, a short synopsis is enough. Page is also smaller, more visibility to other packages.
@@ -186,7 +186,7 @@ import pytest from starlette.config import environ from starlette.testclient import TestClient from sqlalchemy import create_engine -from sqlalchemy_utils import database_exists, create_database +from sqlalchemy_utils import database_exists, create_database, drop_database # This sets `os.environ`, but provides some additional protection. # If we placed it below the application import, it would raise an error
Include global sign out Fix logout request issues Await for current user
@@ -51,6 +51,11 @@ from .oauth2 import OAuthLoginHandler, OAuthenticator AWSCOGNITO_DOMAIN = os.getenv('AWSCOGNITO_DOMAIN') +try: + import boto3 +except: + raise ImportError('boto3 is not installed') + class AWSCognitoMixin(OAuth2Mixin): _OAUTH_AUTHORIZE_URL = "https://%s/oauth2/authorize" % AWSCOGNITO_DOMAIN _OAUTH_ACCESS_TOKEN_URL = "https://%s/oauth2/token" % AWSCOGNITO_DOMAIN @@ -66,28 +71,17 @@ class AWSCognitoLogoutHandler(LogoutHandler): provider in addition to clearing the session with Jupyterhub, otherwise only the Jupyterhub session is cleared. """ - async def get(self): - user = self.get_current_user() + + async def handle_logout(self): + user = await self.get_current_user() if user: await self.clear_tokens(user) - await self.default_handle_logout() - await self.handle_logout() - await self.render_logout_page() - - async def clear_tokens(self, user): - state = await user.get_auth_state() - if state: - state['access_token'] = '' - state['awscognito_user'] = '' - user.save_auth_state(state) - - async def handle_logout(self): http_client = AsyncHTTPClient() params = dict( client_id=self.authenticator.client_id, - redirect_uri=self.authenticator.logout_redirect_url + logout_uri=self.get_login_url() ) url = url_concat("https://%s/logout" % AWSCOGNITO_DOMAIN, params) @@ -104,6 +98,16 @@ class AWSCognitoLogoutHandler(LogoutHandler): await http_client.fetch(req) + async def clear_tokens(self, user): + state = await user.get_auth_state() + if state: + client = boto3.client('cognito-idp') + client.global_sign_out( + AccessToken=state['access_token'] + ) + state['access_token'] = '' + state['awscognito_user'] = '' + user.save_auth_state(state) class AWSCognitoAuthenticator(OAuthenticator): @@ -112,11 +116,6 @@ class AWSCognitoAuthenticator(OAuthenticator): userdata_url = "https://%s/oauth2/userInfo" % AWSCOGNITO_DOMAIN token_url = "https://%s/oauth2/token" % AWSCOGNITO_DOMAIN - logout_redirect_url = \ - Unicode(help="""URL for logging out.""").tag(config=True) - - def _logout_redirect_url_default(self): - return os.getenv('LOGOUT_REDIRECT_URL', '') username_key = Unicode( os.environ.get('AWSCOGNITO_USERNAME_KEY', 'username'),
TravisCI debug: adds -y flag to "add-apt-repository ppa:ubuntu-toolchain-r/test" (from debugging using docker, this command seems to prompt the user for an OK, and that's why it might be hanging...)
@@ -24,7 +24,7 @@ apt-get install libsuitesparse-dev > /dev/null 2>&1 cp /usr/lib/liblapack.so /usr/lib/libsuitesparseconfig.so > /dev/null 2>&1 echo "SuiteSparse complete" -sudo add-apt-repository -m ppa:ubuntu-toolchain-r/test # > /dev/null 2>&1 +sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test # > /dev/null 2>&1 sudo apt-get update # > /dev/null 2>&1 echo "Reinstalling gcc/g++ to get newer versions"
Fix example .pypirc for project tokens * Fix example .pypirc for project tokens Per my comment in it seems a "project" repository must be defined in `index-servers` in order to be usable by twine. * Add missing repository option
{% endtrans %} </p> <pre class="code-block"> +[distutils] + index-servers = + pypi + PROJECT_NAME + [pypi] username = __token__ password = # {% trans %}either a user-scoped token or a project-scoped token you want to set as the default{% endtrans %} [PROJECT_NAME] + repository = https://upload.pypi.org/legacy/ username = __token__ password = # {% trans %}a project token{% endtrans %} </pre> <p>
ebuild.ebd_ipc: Eapply: respect userpriv settings when running patch Fixes permissions issues with created files when running under userpriv mode.
@@ -15,6 +15,7 @@ from snakeoil.contexts import chdir from snakeoil.demandload import demandload from snakeoil.iterables import partition from snakeoil.osutils import pjoin +from snakeoil.process import spawn from pkgcore.exceptions import PkgcoreException, PkgcoreUserException @@ -986,31 +987,36 @@ class Eapply(IpcCommand): def run(self, args, user=False): if user: patch_type = 'user patches' - output = self.observer.warn + output_func = self.observer.warn else: patch_type = 'patches' - output = self.observer.info + output_func = self.observer.info + + spawn_kwargs = {'collect_fds': (1, 2)} + if self.op.userpriv: + spawn_kwargs['uid'] = os_data.portage_uid + spawn_kwargs['gid'] = os_data.portage_gid - try: for path, patches in args: prefix = '' if path is not None: - output(f'Applying {patch_type} from {path!r}:') + output_func(f'Applying {patch_type} from {path!r}:') prefix = ' ' for patch in patches: if path is None: - output(f'{prefix}Applying {os.path.basename(patch)}...') + output_func(f'{prefix}Applying {os.path.basename(patch)}...') else: - output(f'{prefix}{os.path.basename(patch)}...') + output_func(f'{prefix}{os.path.basename(patch)}...') self.observer.flush() + try: with open(patch) as f: - subprocess.run( + ret, output = spawn.spawn_get_output( self.patch_cmd + self.opts.patch_opts, - check=True, stdin=f, stderr=subprocess.PIPE) - except subprocess.CalledProcessError as e: + fd_pipes={0: f.fileno()}, **spawn_kwargs) + if ret: filename = os.path.basename(patch) - msg = f'applying {filename!r} failed: {e.stderr.decode()}' - raise IpcCommandError(msg, code=e.returncode) + msg = f'applying {filename!r} failed: {output[0]}' + raise IpcCommandError(msg, code=ret) except OSError as e: raise IpcCommandError( f'failed reading patch file: {patch!r}: {e.strerror}')
Adding references in response to review Addressing 's [review](https://github.com/google/BIG-bench/pull/69#issuecomment-794661342) and: (i) fixing typos, (ii) adding some references regarding the broader direction.
@@ -38,7 +38,7 @@ Large language models are trained on vast amounts of data crawled from the inter ## Related work -The task, as it is posed in the form of checkmate-in-one-move, is novel. However, the broader task of playing chess with pre-trained language models has been explored before; see [this](). The author(s) observed that without any fine-tuning, GPT-2, was able to predict moves in the appropriate format. [Additional analysis by others]() suggests that the model is reasonably good for opening moves but can fail to predict legal moves in mid- and endgames. In our experience, we found that the model makes several kinds of errors including moving pieces to illegal positions, and trying to use pieces that have been killed. The broader direction of using GPT-2 for playing text-based games been explored as well. +The task, as it is posed in the form of checkmate-in-one-move, is novel. However, the broader task of playing chess with pre-trained language models has been explored before; see [this](https://slatestarcodex.com/2020/01/06/a-very-unlikely-chess-game/). The author(s) observed that without any fine-tuning, GPT-2, was able to predict moves in the appropriate format. [Additional analysis](https://twitter.com/theshawwn/status/1212277698598453249?s=20) suggests that the model is reasonably good for opening moves but can fail to predict legal moves in mid- and endgames. In our experience, we found that the model makes several kinds of errors including moving pieces to illegal positions, and trying to use pieces that have been killed. The broader direction of using GPT-2 for playing text-based games has been explored as well; see e.g., applications in [quest generation](https://arxiv.org/abs/1909.06283), [integrating commonsense](https://arxiv.org/abs/2012.02757) and [text-world generation](https://arxiv.org/abs/2001.10161), and references within. (Please reach out if I missed a reference, I'd be glad to rectify any oversights).
This finally stops the None_{ts}.log being created This isnt tested with music cd's
@@ -15,17 +15,21 @@ def setuplogging(job): if not os.path.exists(cfg['LOGPATH']): os.makedirs(cfg['LOGPATH']) + ## This isnt catching all of them if job.label == "" or job.label is None: if job.disctype == "music": - logfile = "music_cd.log" + logfile = tmplogfile = "music_cd.log" else: - logfile = "empty.log" + logfile = tmplogfile = "empty.log" else: logfile = job.label + ".log" - ## TODO: fix the database is getting the wrong log file + ## this stops log files created with Nono_42342342.log + ## for some reason we need to convert None to a string to compare + if str(logfile) == "None": + return None ## Added from pull 366 But added if statement so we dont touch the empty.log - if logfile != "empty.log" or logfile != "NAS.log": + if logfile != "empty.log" and logfile != "NAS.log": ## lets create a temp var to hold our log name tmplogfile = str(job.label) + "_" + str(round(time.time() * 100)) + ".log" @@ -39,6 +43,7 @@ def setuplogging(job): TmpLogFull = cfg['LOGPATH'] + "/" + logfile logfull = cfg['LOGPATH'] + "/" + tmplogfile + ".log" if os.path.isfile(TmpLogFull) else cfg['LOGPATH'] + "/" + logfile else: + tmplogfile = "empty.log" ## For empty.log and NAS.log we need to set logfull logfull = cfg['LOGPATH'] + logfile if cfg['LOGPATH'][-1:] == "/" else cfg['LOGPATH'] + "/" + logfile
[IMPR] Return self when data_repository() is called on DataSite This motivated by the fact that a data repository doesn't need another one as it can provide its data to itself. It also saves some time on initialization because this information is retrieved from the API.
@@ -7160,6 +7160,18 @@ class DataSite(APISite): # not implemented yet raise NotImplementedError + def data_repository(self): + """ + Override parent method. + + This avoids pointless API queries since the data repository + is this site by definition. + + @return: this Site object + @rtype: DataSite + """ + return self + def loadcontent(self, identification, *props): """ Fetch the current content of a Wikibase item.
[ENH] better `ForecastingHorizon` construction error message Improves the error message of `ForecastingHorizon` when constructor is receiving incompatible input. Related to which would produce a confusing message (input is numpy array but 3D and float).
@@ -121,13 +121,15 @@ def _check_values(values: Union[VALID_FORECASTING_HORIZON_TYPES]) -> pd.Index: else: valid_types = ( "int", - "np.array", + "1D np.ndarray of type int", + "1D np.ndarray of type timedelta or dateoffset", "list", *[f"pd.{index_type.__name__}" for index_type in VALID_INDEX_TYPES], ) raise TypeError( f"Invalid `fh`. The type of the passed `fh` values is not supported. " - f"Please use one of {valid_types}, but found: {type(values)}" + f"Please use one of {valid_types}, but found type {type(values)}, " + f"values = {values}" ) # check values does not contain duplicates
Mute `pkgpanda/test_util:test_write_string` on Windows This test was introduced before automatic Win-tox testing and is failing; muting this test should enable `DC/OS` > `Windows` > `Build` > `tox` passing again
@@ -373,6 +373,8 @@ def test_split_by_token(): ] +# TODO: DCOS_OSS-3508 - muted Windows tests requiring investigation [email protected](pkgpanda.util.is_windows, reason="Windows and Linux permissions parsed differently") def test_write_string(tmpdir): """ `pkgpanda.util.write_string` writes or overwrites a file with permissions
viaplay: try to find correct subtitle based on domain fixes:
@@ -16,6 +16,9 @@ from svtplay_dl.service import Service from svtplay_dl.subtitle import subtitle +country = {".se": "sv", ".dk": "da", ".no": "no"} + + class Viaplay(Service, OpenGraphThumbMixin): supported_domains = [ "tv3play.ee", @@ -76,7 +79,23 @@ class Viaplay(Service, OpenGraphThumbMixin): for n in list(streams.keys()): yield streams[n] if "subtitles" in janson["embedded"] and len(janson["embedded"]["subtitles"]) > 0: + lang = re.search(r"(\.\w\w)$", urlparse(self.url).netloc).group(1) + if lang in country: + language = country[lang] + else: + language = None + + if not self.config.get("get_all_subtitles"): + if not language: yield subtitle(copy.copy(self.config), "wrst", janson["embedded"]["subtitles"][0]["link"]["href"], output=self.output) + else: + for i in janson["embedded"]["subtitles"]: + if i["data"]["language"] == language: + yield subtitle(copy.copy(self.config), "wrst", i["link"]["href"], output=self.output) + + else: + for i in janson["embedded"]["subtitles"]: + yield subtitle(copy.copy(self.config), "wrst", i["link"]["href"], i["data"]["language"], output=copy.copy(self.output)) def find_all_episodes(self, config): episodes = []
Adds _from_memoized_dict to TPState so serialization works again. After changing signature of __init__ to include 'basis' arg, we forgot to add this function, which overrides the DenseState implementation and correctly supplies basis=None to skip 1st-element checking.
@@ -14,6 +14,7 @@ The TPState class and supporting functionality. import numpy as _np from pygsti.baseobjs import Basis as _Basis +from pygsti.baseobjs import statespace as _statespace from pygsti.modelmembers.states.densestate import DenseState as _DenseState from pygsti.modelmembers.states.state import State as _State from pygsti.baseobjs.protectedarray import ProtectedArray as _ProtectedArray @@ -53,12 +54,13 @@ class TPState(_DenseState): # alpha = 1/sqrt(d) = 1/(len(vec)**0.25). def __init__(self, vec, basis="pp", evotype="default", state_space=None): vector = _State._to_vector(vec) + if basis is not None: if not isinstance(basis, _Basis): basis = _Basis.cast(basis, len(vector)) # don't perform this cast if we're given a basis firstEl = basis.elsize**-0.25 # not dim, as the dimension of the vector space may be less if not _np.isclose(vector[0], firstEl): - raise ValueError("Cannot create TPState: " - "first element must equal %g!" % firstEl) + raise ValueError("Cannot create TPState: first element must equal %g!" % firstEl) + # if basis is None, don't check first element (hackfor de-serialization, so we don't need to store basis) _DenseState.__init__(self, vector, evotype, state_space) assert(isinstance(self.columnvec, _ProtectedArray)) @@ -188,3 +190,9 @@ class TPState(_DenseState): bool """ return False + + @classmethod + def _from_memoized_dict(cls, mm_dict, serial_memo): + vec = cls._decodemx(mm_dict['dense_superket_vector']) + state_space = _statespace.StateSpace.from_nice_serialization(mm_dict['state_space']) + return cls(vec, None, mm_dict['evotype'], state_space) # use basis=None to skip 1st element check
Reactivate image before deletion Normal users should not be able to delete disabled images (see [1]) as this can lead to inconsistencies between database and storage backend. Tempest relies on being able to do this in some tests. [1]: Related-Bug:
@@ -336,6 +336,9 @@ class VolumesNegativeTest(base.BaseVolumeTest): # Deactivate the image self.images_client.deactivate_image(image['id']) + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + self.images_client.reactivate_image, image['id']) + body = self.images_client.show_image(image['id']) self.assertEqual("deactivated", body['status']) # Try creating a volume from deactivated image
Add consistency check for superseded NEPs A superseded NEP should have a Replaced-By header. The replacing NEP should have a Replaces header. They should point to one another.
@@ -21,7 +21,7 @@ def nep_metadata(): sources = sorted(glob.glob(r'nep-*.rst')) sources = [s for s in sources if not s in ignore] - meta_re = r':([a-zA-Z]*): (.*)' + meta_re = r':([a-zA-Z\-]*): (.*)' neps = {} print('Loading metadata for:') @@ -40,8 +40,44 @@ def nep_metadata(): tags['Title'] = lines[1].strip() tags['Filename'] = source + + if tags['Status'] in ('Accepted', 'Rejected', 'Withdrawn'): + if not 'Resolution' in tags: + raise RuntimeError( + f'NEP {nr} is Accepted/Rejected/Withdrawn but ' + 'has no Resolution tag' + ) + neps[nr] = tags + # Now that we have all of the NEP metadata, do some global consistency + # checks + + for nr, tags in neps.items(): + if tags['Status'] == 'Superseded': + if not 'Replaced-By' in tags: + raise RuntimeError( + f'NEP {nr} has been Superseded, but has no Replaced-By tag' + ) + + replaced_by = int(tags['Replaced-By']) + replacement_nep = neps[replaced_by] + + if not int(replacement_nep['Replaces']) == nr: + raise RuntimeError( + f'NEP {nr} is superseded by {replaced_by}, but that NEP has a ' + f"Replaces tag of `{replacement_nep['Replaces']}`." + ) + + if 'Replaces' in tags: + replaced_nep = int(tags['Replaces']) + replaced_nep_tags = neps[replaced_nep] + if not replaced_nep_tags['Status'] == 'Superseded': + raise RuntimeError( + f'NEP {nr} replaces {replaced_nep}, but that NEP has not ' + f'been set to Superseded' + ) + return {'neps': neps}
Wrap bare node return types in public properties as entities TN:
:= ${arg.public_default_value.render_public_ada_constant()} % endif % endfor - ) return ${(property.type.api_name)} + ) return ${(property.public_type.api_name)} </%def> <%def name="decl(property)"> ) ) + elif property.type.is_ast_node: + wrapped_result = ('({} (Property_Result), No_Public_Entity_Info)' + .format(root_node_type_name)) + elif property.type.is_array_type: if property.type.element_type.is_entity_type: entity_type = property.type.element_type.name ${'({})'.format(', '.join(actuals)) if actuals else ''}; % if wrap_code: - return Result : ${property.type.api_name} ${( + return Result : ${property.public_type.api_name} ${( ':= {}'.format(wrapped_result) if wrapped_result else '' )} do
Make CF Appendix A checks optional Makes CF Appendix A checks optional. They will now have to be enabled explicitly via passing 'cf:enable_appendix_a_checks' to CF checker related options.
@@ -61,14 +61,14 @@ class CFBaseCheck(BaseCheck): CF Convention Checker Base """ - def __init__(self): + def __init__(self, options=None): # The compliance checker can be run on multiple datasets in a single # instantiation, so caching values has be done by the unique identifier # for each dataset loaded. # Each default dict is a key, value mapping from the dataset object to # a list of variables - super(CFBaseCheck, self).__init__() + super(CFBaseCheck, self).__init__(options) self._coord_vars = defaultdict(list) self._ancillary_vars = defaultdict(list) self._clim_vars = defaultdict(list) @@ -891,9 +891,13 @@ class CFBaseCheck(BaseCheck): :rtype: list :return: A list of results corresponding to the results returned """ + # if 'enable_appendix_a_checks' isn't specified in the checks, + # don't do anything on this check + results = [] + if 'enable_appendix_a_checks' not in self.options: + return results possible_global_atts = (set(ds.ncattrs()). intersection(self.appendix_a.keys())) - results = [] attr_location_ident = {'G': 'global attributes', 'C': 'coordinate data', 'D': 'non-coordinate data'} @@ -1180,8 +1184,8 @@ class CF1_6Check(CFNCCheck): } appendix_a = appendix_a_base - def __init__(self): # initialize with parent methods and data - super(CF1_6Check, self).__init__() + def __init__(self, options=None): # initialize with parent methods and data + super(CF1_6Check, self).__init__(options) self.cell_methods = cell_methods16 self.grid_mapping_dict = grid_mapping_dict16 @@ -3952,8 +3956,8 @@ class CF1_7Check(CF1_6Check): 'scale_factor': {'Type': 'N', 'attr_loc': {'D', 'C'}, 'cf_section': '8.1'} }) - def __init__(self): - super(CF1_7Check, self).__init__() + def __init__(self, options=None): + super(CF1_7Check, self).__init__(options) self.cell_methods = cell_methods17 self.grid_mapping_dict = grid_mapping_dict17
fixed a typo "required" is missing in the end of the sentence (reference `tempban_unknown_error`).
"mute_role_already_removed": "The mute for {user} (``{user_id}``) expired (infraction ``{inf_id}``) but someone else already removed the mute role in advance!", "unmute_missing_perms": "The mute for {user} (``{user_id}``) expired (infraction ``{inf_id}``) but I no longer have the ``MANAGE_ROLES`` permission and was unable to remove the mute role.", "unmuted": "{user} (``{user_id}``) was automatically unmuted as their tempmute expired (infraction ``{inf_id}``).", - "unmute_unknown_error": "The mute for {user} (``{user_id}``) expired (infraction ``{inf_id}``) but something went wrong when trying to unmute. Manual unmuting is probably.", + "unmute_unknown_error": "The mute for {user} (``{user_id}``) expired (infraction ``{inf_id}``) but something went wrong when trying to unmute. Manual unmuting is probably required.", "tempban_already_lifted": "The tempban for {user} (``{user_id}``) expired (infraction ``{inf_id}``) but someone already lifted the ban earlier!", "tempban_expired_missing_perms": "The tempban for {user} (``{user_id}``) expired (infraction ``{inf_id}``) but I no longer have the ``ban_members`` permission required to lift the ban.", "tempban_unknown_error": "The tempban for {user} (``{user_id}``) expired (infraction ``{inf_id}``) but something went wrong when trying to unban. Manual lifting of the ban is probably required.",
common: install ceph-volume package After pacific release, ceph-volume has its own package. ceph-ansible has to explicitly install it on osd nodes.
@@ -6,6 +6,7 @@ debian_ceph_pkgs: - "ceph-common" - "{{ ((ceph_repository == 'rhcs') and (mon_group_name in group_names)) | ternary('ceph-mon', 'ceph-common') }}" - "{{ ((ceph_repository == 'rhcs') and (osd_group_name in group_names)) | ternary('ceph-osd', 'ceph-common') }}" + - "{{ (osd_group_name in group_names) | ternary('ceph-volume', 'ceph-common') }}" - "{{ (ceph_test | bool) | ternary('ceph-test', 'ceph-common') }}" - "{{ (rgw_group_name in group_names) | ternary('radosgw', 'ceph-common') }}" - "{{ ((ceph_repository == 'rhcs') and (client_group_name in group_names)) | ternary('ceph-fuse', 'ceph-common') }}" @@ -15,6 +16,7 @@ redhat_ceph_pkgs: - "ceph-common" - "{{ (mon_group_name in group_names) | ternary('ceph-mon', 'ceph-common') }}" - "{{ (osd_group_name in group_names) | ternary('ceph-osd', 'ceph-common') }}" + - "{{ (osd_group_name in group_names) | ternary('ceph-volume', 'ceph-common') }}" - "{{ (client_group_name in group_names) | ternary('ceph-fuse', 'ceph-common') }}" - "{{ (client_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}" - "{{ (rgw_group_name in group_names) | ternary('ceph-radosgw', 'ceph-common') }}" @@ -24,6 +26,7 @@ suse_ceph_pkgs: - "ceph-common" - "{{ (mon_group_name in group_names) | ternary('ceph-mon', 'ceph-common') }}" - "{{ (osd_group_name in group_names) | ternary('ceph-osd', 'ceph-common') }}" + - "{{ (osd_group_name in group_names) | ternary('ceph-volume', 'ceph-common') }}" - "{{ (client_group_name in group_names) | ternary('ceph-fuse', 'ceph-common') }}" - "{{ (client_group_name in group_names) | ternary('ceph-base', 'ceph-common') }}" - "{{ (rgw_group_name in group_names) | ternary('ceph-radosgw', 'ceph-common') }}"
Add job ids to the resource dictionary Otherwise, `Condor.cancel` and `Condor.status` fail with a `KeyError`.
@@ -262,6 +262,7 @@ pip3 install ipyparallel """ processes = [str(x) for x in range(0,int(line[0]))] job_id += [cluster + process for process in processes] + self._add_resource(job_id) return job_id ########################################################################################################### @@ -297,10 +298,12 @@ pip3 install ipyparallel """ def current_capacity(self): return self - def _test_add_resource (self, job_id): - self.resources.extend([{'job_id' : job_id, + def _add_resource(self, job_id): + for jid in job_id: + self.resources[jid] = { 'status': 'PENDING', - 'size' : 1 }]) + 'size': 1 + } return True if __name__ == "__main__" :
List opencv as optional package since currently it is only needed for LK
@@ -26,7 +26,6 @@ The pysteps package needs the following dependencies * matplotlib_ * netCDF4_ * numpy_ -* opencv-python_ * scipy_ .. _attrdict : https://pypi.org/project/attrdict/ @@ -34,7 +33,6 @@ The pysteps package needs the following dependencies .. _jsonschema : https://pypi.org/project/jsonschema/ .. _numpy: http://www.numpy.org/ .. _scipy: https://www.scipy.org/ -.. _opencv-python: https://opencv.org/ .. _matplotlib: http://matplotlib.org/ .. _netCDF4: https://pypi.org/project/netCDF4/ @@ -51,6 +49,7 @@ Other optional packages include: * cartopy_ or basemap_ (for georeferenced visualization) * h5py_ (for importing HDF5 data) +* opencv-python_ (for the Lucas-Kanade optical flow) * pillow_ (for importing gif data) * pyproj_ (for cartographic transformations) * scikit-image_ (for the VET optical flow method) @@ -58,6 +57,7 @@ Other optional packages include: .. _basemap: https://matplotlib.org/basemap/ .. _cartopy: https://scitools.org.uk/cartopy/docs/v0.16/ .. _h5py: https://www.h5py.org/ +.. _opencv-python: https://opencv.org/ .. _pillow: https://python-pillow.org/ .. _pyproj: https://jswhit.github.io/pyproj/ .. _scikit-image: https://scikit-image.org/
Fixed datetime formatting bug Fixed bug where netCDF4 cftime in linux systems would result in error for not being compatible with %Y-%m-%d %H:%M.
@@ -1559,20 +1559,20 @@ class Environment: # Check if time is inside range supplied by file if timeIndex == 0 and inputTimeNum < fileTimeNum: raise ValueError( - "Chosen launch time is not available in the provided file, which starts at {:%Y-%m-%d %H:%M}.".format( + "Chosen launch time is not available in the provided file, which starts at {:}.".format( fileTimeDate ) ) elif timeIndex == len(timeArray) - 1 and inputTimeNum > fileTimeNum: raise ValueError( - "Chosen launch time is not available in the provided file, which ends at {:%Y-%m-%d %H:%M}.".format( + "Chosen launch time is not available in the provided file, which ends at {:}.".format( fileTimeDate ) ) # Check if time is exactly equal to one in the file if inputTimeNum != fileTimeNum: warnings.warn( - "Exact chosen launch time is not available in the provided file, using {:%Y-%m-%d %H:%M} UTC instead.".format( + "Exact chosen launch time is not available in the provided file, using {:} UTC instead.".format( fileTimeDate ) ) @@ -1942,20 +1942,20 @@ class Environment: # Check if time is inside range supplied by file if timeIndex == 0 and inputTimeNum < fileTimeNum: raise ValueError( - "Chosen launch time is not available in the provided file, which starts at {:%Y-%m-%d %H:%M}.".format( + "Chosen launch time is not available in the provided file, which starts at {:}.".format( fileTimeDate ) ) elif timeIndex == len(timeArray) - 1 and inputTimeNum > fileTimeNum: raise ValueError( - "Chosen launch time is not available in the provided file, which ends at {:%Y-%m-%d %H:%M}.".format( + "Chosen launch time is not available in the provided file, which ends at {:}.".format( fileTimeDate ) ) # Check if time is exactly equal to one in the file if inputTimeNum != fileTimeNum: warnings.warn( - "Exact chosen launch time is not available in the provided file, using {:%Y-%m-%d %H:%M} UTC instead.".format( + "Exact chosen launch time is not available in the provided file, using {:} UTC instead.".format( fileTimeDate ) )
Dev * removes ssao=t * append singletons reads to sam add unmapped reads to sam so that pileup statistics in log file are correct.
@@ -515,7 +515,7 @@ rule align_reads_to_final_contigs: ref={input.fasta} \ {params.input} \ trimreaddescriptions=t \ - outm={output.sam} \ + out={output.sam} \ {params.unmapped} \ threads={threads} \ pairlen={params.max_distance_between_pairs} \ @@ -528,6 +528,8 @@ rule align_reads_to_final_contigs: local=t \ ambiguous={params.ambiguous} \ secondary=t \ + append=t \ + machineout=t \ maxsites={params.maxsites} \ -Xmx{resources.java_mem}G \ 2> {log}
Add SupplierPart detail API RUD view
@@ -167,6 +167,16 @@ class SupplierPartList(generics.ListAPIView): ] +class SupplierPartDetail(generics.RetrieveUpdateDestroyAPIView): + + queryset = SupplierPart.objects.all() + serializer_class = SupplierPartSerializer + permission_classes = (permissions.IsAuthenticatedOrReadOnly,) + + read_only_fields = [ + ] + + class SupplierPriceBreakList(generics.ListCreateAPIView): queryset = SupplierPriceBreak.objects.all() @@ -189,13 +199,21 @@ cat_api_urls = [ url(r'^$', CategoryList.as_view(), name='api-part-category-list'), ] +supplier_part_api_urls = [ + + url(r'^(?P<pk>\d+)/?', SupplierPartDetail.as_view(), name='api-supplier-part-detail'), + + # Catch anything else + url(r'^.*$', SupplierPartList.as_view(), name='api-part-supplier-list'), +] + part_api_urls = [ url(r'^tree/?', PartCategoryTree.as_view(), name='api-part-tree'), url(r'^category/', include(cat_api_urls)), + url(r'^supplier/?', include(supplier_part_api_urls)), url(r'^price-break/?', SupplierPriceBreakList.as_view(), name='api-part-supplier-price'), - url(r'^supplier/?', SupplierPartList.as_view(), name='api-part-supplier-list'), url(r'^bom/?', BomList.as_view(), name='api-bom-list'), url(r'^(?P<pk>\d+)/', PartDetail.as_view(), name='api-part-detail'),
Fix colors in CSS completion `_COLOR_KEYWORDS` is a dict. At least, it is now.
@@ -46,7 +46,7 @@ class CssNamedColorsCompletionProvider(GObject.GObject, GtkSource.CompletionProv store = Gio.ListStore.new(CssNamedColorProposal) self._filter_data.word = context.get_word() - for color_name, color_rgb in tinycss2.color3._COLOR_KEYWORDS: + for color_name in tinycss2.color3._COLOR_KEYWORDS: proposal = CssNamedColorProposal(color_name) store.append(proposal)
Run PyTorch macOS CPU-only build/test on all PRs Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -41,6 +41,7 @@ default_set = set([ # Caffe2 OSX 'caffe2-py2-system-macos10.13', # PyTorch OSX + 'pytorch-macos-10.13-py3', 'pytorch-macos-10.13-cuda9.2-cudnn7-py3', # PyTorch Android 'pytorch-linux-xenial-py3-clang5-android-ndk-r19c-x86_32-build',
Bind Is_Ghost in the Python API TN:
@@ -641,6 +641,11 @@ class ${root_astnode_name}(object): ${py_doc('langkit.node_kind', 8)} return self._kind_name + @property + def is_ghost(self): + ${py_doc('langkit.node_is_ghost', 8)} + return bool(_node_is_ghost(self._c_value)) + @property def sloc_range(self): ${py_doc('langkit.node_sloc_range', 8)} @@ -1062,6 +1067,10 @@ _kind_name = _import_func( '${capi.get_name("kind_name")}', [_enum_node_kind], _text ) +_node_is_ghost = _import_func( + '${capi.get_name("node_is_ghost")}', + [_node], ctypes.c_int +) _node_short_image = _import_func( '${capi.get_name("node_short_image")}', [_node], _text