message
stringlengths
13
484
diff
stringlengths
38
4.63k
Better on_error handling for from_callable Pass the instance of the caught exception to the `on_error` handler, instead of the exception's type.
@@ -58,8 +58,8 @@ def from_callable(cls, supplier, scheduler=None): try: observer.on_next(supplier()) observer.on_completed() - except Exception: - observer.on_error(Exception) + except Exception as e: + observer.on_error(e) return scheduler.schedule(action) return AnonymousObservable(subscribe)
refactor(ldap): use posixgroup adjusted to posixgroup as openldap groups use objectclass 'posixgroup' for both a posix group and a samba group. issue
@@ -161,8 +161,8 @@ class LDAPSettings(Document): elif self.ldap_directory_server.lower() == 'openldap': - ldap_object_class = 'GroupOfNames' - ldap_group_members_attribute = 'member' + ldap_object_class = 'posixgroup' + ldap_group_members_attribute = 'memberuid' elif self.ldap_directory_server.lower() == 'custom':
Derive clone for consensus engine helpers This will make them easier to work with when writing engines.
@@ -32,7 +32,7 @@ pub enum Update { BlockCommit(BlockId), } -#[derive(Default, Debug, Eq, Hash, PartialEq)] +#[derive(Clone, Default, Debug, Eq, Hash, PartialEq)] pub struct BlockId(Vec<u8>); impl Deref for BlockId { type Target = Vec<u8>; @@ -53,7 +53,7 @@ impl From<Vec<u8>> for BlockId { } /// All information about a block that is relevant to consensus -#[derive(Default, Debug)] +#[derive(Clone, Default, Debug)] pub struct Block { pub block_id: BlockId, pub previous_id: BlockId, @@ -62,7 +62,7 @@ pub struct Block { pub payload: Vec<u8>, } -#[derive(Default, Debug)] +#[derive(Clone, Default, Debug)] pub struct PeerId(Vec<u8>); impl Deref for PeerId { type Target = Vec<u8>;
fix: use current_app instead of 'app' for callbacks. Not sure this really mattered - but makes things uniform. For callbacks - use current_app rather than app.
@@ -854,7 +854,7 @@ class Security(object): # N.B. as of jinja 2.9 '_' is always registered # http://jinja.pocoo.org/docs/2.10/extensions/#i18n-extension if "_" not in app.jinja_env.globals: - app.jinja_env.globals["_"] = state.i18n_domain.gettext + current_app.jinja_env.globals["_"] = state.i18n_domain.gettext @app.before_first_request def _csrf_init(): @@ -905,9 +905,9 @@ class Security(object): if csrf: csrf.exempt("flask_security.views.logout") if csrf_cookie and csrf_cookie["key"]: - app.after_request(csrf_cookie_handler) + current_app.after_request(csrf_cookie_handler) # Add configured header to WTF_CSRF_HEADERS - app.config["WTF_CSRF_HEADERS"].append(cv("CSRF_HEADER")) + current_app.config["WTF_CSRF_HEADERS"].append(cv("CSRF_HEADER")) app.extensions["security"] = state
Update __main__.py Better examples
@@ -283,6 +283,16 @@ print("Position of PP1 and PP2: ", obj.principalPlanePositions(z=0)) print("Focal spots positions: ", obj.focusPositions(z=0)) print("Distance between entrance and exit planes: ", obj.L) +path = ImagingPath() +path.fanAngle = 0.0 +path.fanNumber = 1 +path.rayNumber = 15 +path.objectHeight = 10.0 +path.label = "Demo #14 Path with generic objective" +path.append(Space(180)) +path.append(obj) +path.append(Space(10)) +path.display(comments=path.label+""" path = ImagingPath() path.fanAngle = 0.0 path.fanNumber = 1 @@ -292,7 +302,7 @@ path.label = "Path with generic objective" path.append(Space(180)) path.append(obj) path.append(Space(10)) -path.display() +path.display()""") # Demo #15: Olympus objective LUMPlanFL40X path = ImagingPath() @@ -300,10 +310,10 @@ path.fanAngle = 0.0 path.fanNumber = 1 path.rayNumber = 15 path.objectHeight = 10.0 -path.label = "Path with LUMPlanFL40X" +path.label = "Demo #15 Path with LUMPlanFL40X" path.append(Space(180)) path.append(olympus.LUMPlanFL40X()) -path.display(comments=path.label+"""\n +path.display(comments=path.label+""" path = ImagingPath() path.fanAngle = 0.0 path.fanNumber = 1 @@ -314,11 +324,10 @@ path.append(Space(180)) path.append(olympus.LUMPlanFL40X()) path.append(Space(10)) path.display()""") -path.append(Space(10)) # Demo #16: Vendor lenses path = ImagingPath() -path.label = "Vendor Lenses" +path.label = "Demo #16: Vendor Lenses" path.append(Space(d=5)) path.append(thorlabs.AC254_050_A()) path.append(Space(d=50))
[Stress tester XFails] Update XFails Add two new timeouts that started occurring after migrating argument completion to solver-based Add another case of that I missed to add in my last PR
], "issueUrl" : "https://bugs.swift.org/browse/SR-14694" }, + { + "path" : "*\/MovieSwift\/MovieSwift\/MovieSwift\/views\/components\/moviesList\/base\/MoviesList.swift", + "modification" : "unmodified", + "issueDetail" : { + "kind" : "codeComplete", + "offset" : 4791 + }, + "applicableConfigs" : [ + "main" + ], + "issueUrl" : "https://bugs.swift.org/browse/SR-14694" + }, + { + "path" : "*\/MovieSwift\/MovieSwift\/MovieSwift\/views\/components\/moviesList\/base\/MoviesList.swift", + "modification" : "unmodified", + "issueDetail" : { + "kind" : "codeComplete", + "offset" : 4959 + }, + "applicableConfigs" : [ + "main" + ], + "issueUrl" : "https://bugs.swift.org/browse/SR-14694" + }, + { + "path" : "*\/MovieSwift\/MovieSwift\/MovieSwift\/views\/components\/moviesList\/base\/MoviesList.swift", + "modification" : "unmodified", + "issueDetail" : { + "kind" : "codeComplete", + "offset" : 5023 + }, + "applicableConfigs" : [ + "main" + ], + "issueUrl" : "https://bugs.swift.org/browse/SR-14694" + }, { "path" : "*\/ACHNBrowserUI\/ACHNBrowserUI\/ACHNBrowserUI\/packages\/Backend\/Sources\/Backend\/models\/Villager.swift", "modification" : "concurrent-658", ], "issueUrl" : "https://bugs.swift.org/browse/SR-16014" }, + { + "path" : "*\/MovieSwift\/MovieSwift\/Packages\/UI\/Sources\/UI\/fields\/SearchTextObservable.swift", + "modification" : "unmodified", + "issueDetail" : { + "kind" : "codeComplete", + "offset" : 976 + }, + "applicableConfigs" : [ + "main" + ], + "issueUrl" : "https://bugs.swift.org/browse/SR-16014" + }, { "path" : "*\/ACHNBrowserUI\/ACHNBrowserUI\/ACHNBrowserUI\/views\/todayDashboard\/TodayView.swift", "modification" : "unmodified",
Fix race condition in `ThreadedHistory`. The Lock in `ThreadedHistory` was not always properly released, and because of that, in situations where the user was pasting enormous amounts of text, the application could freeze at the point where lines were added to the history.
@@ -158,8 +158,13 @@ class ThreadedHistory(History): continue # Read new items (in lock). - await loop.run_in_executor(None, self._lock.acquire) + # (Important: acquiring the lock should happen *in* the try + # block. Otherwise it's not guaranteed it will ever be + # released. This can happen when this coroutine is cancelled at + # an "await" point. This did actually happen when continuously + # pasting huge amounts of text in ptpython.) try: + await loop.run_in_executor(None, self._lock.acquire) new_items = self._loaded_strings[items_yielded:] done = self._loaded event.clear()
libmanage.py: turn --cargs into --gargs and make it convenient for opts TN:
@@ -9,6 +9,7 @@ import os from os import path import pdb import pipes +import shlex import shutil import subprocess import sys @@ -375,8 +376,8 @@ class ManageScript(object): help='Disable warnings to build the generated library' ) subparser.add_argument( - '--cargs', nargs='*', default=[], - help='Options to pass as "-cargs" to GPRbuild' + '--gargs', + help='Options appened to GPRbuild invocations' ) subparser.add_argument( '--disable-mains', type=self.parse_mains_list, default=[], nargs=1, @@ -698,10 +699,11 @@ class ManageScript(object): elif args.verbosity == Verbosity('debug'): base_argv.append('-vl') - cargs = [] - # Depending on where this is invoked, the "cargs" option may not be set - if hasattr(args, 'cargs'): - cargs.extend(args.cargs) + # Depending on where this is invoked, the "--gargs" option may not be + # set. Don't call shlex.split with an empty input, otherwise it will + # try to read something from stdin... + gargs = getattr(args, 'gargs', '') + gargs = shlex.split(gargs) if gargs else [] def run(library_type): argv = list(base_argv) @@ -711,8 +713,7 @@ class ManageScript(object): argv.extend('{}.adb'.format(main) for main in mains) if Diagnostics.style == DiagnosticStyle.gnu_full: argv.append('-gnatef') - argv.append('-cargs') - argv.extend(cargs) + argv.extend(gargs) self.check_call(args, 'Build', argv) build_shared, build_static = self.what_to_build(args, is_library)
[tasks] remove redundant condition in Loop.next_iteration self._task is only None if the Loop has never been started before, which means None should be returned always, regardless of how many seconds was passed into the constructor this didn't break anything before because self._next_iteration will be None as well if self._task is None.
@@ -154,7 +154,7 @@ class Loop: .. versionadded:: 1.3 """ - if self._task is None and self._sleep: + if self._task is None: return None elif self._task and self._task.done() or self._stop_next_iteration: return None
populate_db: Generate resolved topics for testing. To try to match normal workflow, some streams have many resolved topics and others have few.
@@ -6,6 +6,7 @@ from typing import Any, Dict, List import orjson from scripts.lib.zulip_tools import get_or_create_dev_uuid_var_path +from zerver.lib.topic import RESOLVED_TOPIC_PREFIX def load_config() -> Dict[str, Any]: @@ -36,7 +37,23 @@ def generate_topics(num_topics: int) -> List[str]: topic = " ".join(filter(None, generated_topic)) topics.append(topic) - return topics + # Mark a small subset of topics as resolved in some streams, and + # many topics in a few streams. Note that these don't have the + # "Marked as resolved" messages, so don't match the normal user + # experience perfectly. + if random.random() < 0.15: + resolved_topic_probability = 0.5 + else: + resolved_topic_probability = 0.05 + + final_topics = [] + for topic in topics: + if random.random() < resolved_topic_probability: + final_topics.append(RESOLVED_TOPIC_PREFIX + topic) + else: + final_topics.append(topic) + + return final_topics def load_generators(config: Dict[str, Any]) -> Dict[str, Any]:
fix setting and getting locale on SUSE systems Also on with systems with systemd, SUSE still uses /etc/sysconfig/language
@@ -127,13 +127,14 @@ def get_locale(): salt '*' locale.get_locale ''' cmd = '' - if salt.utils.systemd.booted(__context__): + if 'Suse' in __grains__['os_family']: + # this block applies to all SUSE systems - also with systemd + cmd = 'grep "^RC_LANG" /etc/sysconfig/language' + elif salt.utils.systemd.booted(__context__): params = _parse_dbus_locale() if HAS_DBUS else _parse_localectl() return params.get('LANG', '') elif 'RedHat' in __grains__['os_family']: cmd = 'grep "^LANG=" /etc/sysconfig/i18n' - elif 'SUSE' in __grains__['os_family']: - cmd = 'grep "^RC_LANG" /etc/sysconfig/language' elif 'Debian' in __grains__['os_family']: # this block only applies to Debian without systemd cmd = 'grep "^LANG=" /etc/default/locale' @@ -161,7 +162,17 @@ def set_locale(locale): salt '*' locale.set_locale 'en_US.UTF-8' ''' - if salt.utils.systemd.booted(__context__): + if 'Suse' in __grains__['os_family']: + # this block applies to all SUSE systems - also with systemd + if not __salt__['file.file_exists']('/etc/sysconfig/language'): + __salt__['file.touch']('/etc/sysconfig/language') + __salt__['file.replace']( + '/etc/sysconfig/language', + '^RC_LANG=.*', + 'RC_LANG="{0}"'.format(locale), + append_if_not_found=True + ) + elif salt.utils.systemd.booted(__context__): return _localectl_set(locale) elif 'RedHat' in __grains__['os_family']: if not __salt__['file.file_exists']('/etc/sysconfig/i18n'): @@ -172,15 +183,6 @@ def set_locale(locale): 'LANG="{0}"'.format(locale), append_if_not_found=True ) - elif 'SUSE' in __grains__['os_family']: - if not __salt__['file.file_exists']('/etc/sysconfig/language'): - __salt__['file.touch']('/etc/sysconfig/language') - __salt__['file.replace']( - '/etc/sysconfig/language', - '^RC_LANG=.*', - 'RC_LANG="{0}"'.format(locale), - append_if_not_found=True - ) elif 'Debian' in __grains__['os_family']: # this block only applies to Debian without systemd update_locale = salt.utils.which('update-locale')
[modules/battery_all] Fix remaining time calculation Thanks to for pointing out a bug in the calculation of the remaining time for multiple batteries. see
@@ -6,6 +6,7 @@ Parameters: * battery.device : Comma-separated list of battery devices to read information from (defaults to auto for auto-detection) * battery.warning : Warning threshold in % of remaining charge (defaults to 20) * battery.critical : Critical threshold in % of remaining charge (defaults to 10) + * batter.showremaining : If set to true (default) shows the remaining time until the batteries are completely discharged """ import os @@ -84,7 +85,7 @@ class Module(bumblebee.engine.Module): widget.set("capacity", 100) return "ac" - capacity = int( self.energy_now / self.energy_full * 100) + capacity = int( float(self.energy_now) / float(self.energy_full) * 100.0) capacity = capacity if capacity < 100 else 100 widget.set("capacity", capacity) output = "{}%".format(capacity)
Update version 0.7.6 -> 0.7.7 New SampleSet object
# # ================================================================================================ -__version__ = '0.7.6' +__version__ = '0.7.7' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
Don't get node info, if there is no node. It prevents the exception is reraised, if node is not ready.
@@ -118,7 +118,7 @@ class TestResult: set_filtered_fields(self, result_message, fields=fields) # get information of default node, and send to notifier. - if self.environment: + if self.environment and self.environment.nodes: environment_information = ( self.environment.default_node.get_node_information() )
Only check if docs build Summary: to facilitate OSS contributions Test Plan: Unit tests Reviewers: schrockn, alangenfeld, natekupp
-# py27 compat, see https://stackoverflow.com/a/844443/324449 -import io import os -import re import subprocess import sys from dagster.utils import script_relative_path -BUILT_DOCS_RELATIVE_PATH = '_build/' - -IGNORE_FILES = [ - '.DS_Store', - '.pytest_cache', - 'objects.inv', - '[A-Z0-9a-z_-]*\\.png', - '[A-Z0-9a-z_-]*\\.gif', - '[A-Z0-9a-z_-]*\\.doctree', - '[A-Z0-9a-z_-]*\\.pickle', - 'searchindex.js', -] - - -def _path_starts_with(path, starts_with): - if not isinstance(path, list): - path = path.split(os.sep) - - i = len(starts_with) - - return path[:i] == starts_with - - -# Right now, these tests fail as soon as a snapshot fails -- and there is no way to see *all* of -# the snapshot failures associated with a diff. We should probably break doc build into a fixture, -# and then figure out a way to either dynamically generate a test case for each snapshot -# (probably hard since tests are collected before fixtures are executed -- but maybe we can lever -# the checked-in snapshots for this) or collect the test failures and display all of them. @pytest.mark.docs @pytest.mark.skipif(sys.version_info < (3, 6), reason="We don't support building docs in python 2") -def test_build_all_docs(snapshot): +def test_build_all_docs(): pwd = os.getcwd() try: os.chdir(script_relative_path('.')) subprocess.check_output(['make', 'clean']) subprocess.check_output(['make', 'html']) - os.chdir(script_relative_path(BUILT_DOCS_RELATIVE_PATH)) - walked = sorted( - [ - ( - dirpath.split(os.sep), - sorted(dirnames), - sorted( - [ - filename - for filename in filenames - if not any((re.match(pattern, filename) for pattern in IGNORE_FILES)) - ] - ), - ) - for dirpath, dirnames, filenames in os.walk('.') - # Omit the built source files and autodocs from snapshot testing to avoid test - # failure fatigue - if ( - not _path_starts_with(dirpath, ['.', 'html', '_modules', 'dagster']) - and not _path_starts_with(dirpath, ['.', 'html', '_sources', 'sections', 'api']) - ) - ], - key=lambda x: x[0], - ) - snapshot.assert_match(walked) - # The snapshot tests only need to run on py3, because the docs aren't built on py2 - if sys.version_info[0] < 3: - return - - for dirpath, _dirnames, filenames in walked: - # Omit the built source files and autodocs from snapshot testing to avoid test - # failure fatigue - if _path_starts_with( - dirpath, ['.', 'html', '_modules', 'dagster'] - ) or _path_starts_with(dirpath, ['.', 'html', '_sources', 'sections', 'api']): - continue - for filename in filenames: - if any((re.match(pattern, filename) for pattern in IGNORE_FILES)): - continue - # py27 compat - with io.open( - os.path.join(*([d for d in dirpath] + [filename])), mode='r', encoding='utf-8' - ) as fd: - try: - snapshot.assert_match(fd.read()) - except UnicodeDecodeError: - raise Exception((dirpath, filename)) - finally: os.chdir(pwd)
Test fix - BuildKit Fails on CI Fix
@@ -54,13 +54,15 @@ ENV WORKDIR /out RUN mkdir -p $WORKDIR WORKDIR $WORKDIR +# The argument needs to be re-declared otherwise it returns an empty string. +ARG fuzzer # Copy over all the build artifacts (without * to preserve directory structure). # This also copies seed and dictionary files if they are available. COPY --from=builder /out/ ./ # Copy the fuzzer.py file. -COPY --from=builder /src/fuzzer.py . +COPY fuzzers/$fuzzer/fuzzer.py . # Copy the fuzzers directory. -COPY --from=builder /src/fuzzers fuzzers +COPY fuzzers/ ./fuzzers # Create empty __init__.py to allow python deps to work. RUN touch __init__.py
Update radix_sort.py This will fix the error in the list index showing as float
@@ -10,8 +10,8 @@ def radixsort(lst): # split lst between lists for i in lst: - tmp = i / placement - buckets[tmp % RADIX].append( i ) + tmp = int((i / placement) % RADIX) + buckets[tmp].append(i) if maxLength and tmp > 0: maxLength = False
Attach subscription during provisioning. Fixes
pool: "{{ rhsm_pool }}" when: "'Current' not in subscribed.stdout and rhsm_user is defined and rhsm_user" + - name: Check if subscription is attached + command: subscription-manager list --consumed --pool-only --matches="{{ rhsm_pool }}" + register: subscription_attached + changed_when: no + + - block: + - name: Get pool id + shell: subscription-manager list --available --pool-only --matches="{{ rhsm_pool }}" | head -n 1 + register: pool_id + changed_when: no + + - name: Fail if no pool ID is returned + fail: + msg: No subscription matching "{{ rhsm_pool }}" found + when: pool_id.stdout == "" + + - name: Attach subscription + command: subscription-manager attach --pool="{{ pool_id.stdout }}" + + when: subscription_attached.stdout == "" + when: ansible_distribution == "RedHat"
Added few data sources and updated previous ones. Made few changes in data sources: 1.) Added links to few data sources. 2.) Updated previous data sources with added links to their respective sites.
@@ -5,10 +5,13 @@ to select data source basing on location, or on the user's preferences. ## Possible data sources -* OpenWeatherMap -* AccuWeather -* Windy.com -* yr.no +* [Open weather map](https://openweathermap.org/) +* [Accu weather](https://www.accuweather.com/) +* [Windy](https://www.windy.com/?26.953,75.711,5) +* [Yr](https://www.yr.no/nb) * [BBC WeatherFeeds](https://support.bbc.co.uk/platform/feeds/WeatherFeeds.htm) -* http://www.bom.gov.au - +* [Bom](http://www.bom.gov.au) +* [IMD](https://mausam.imd.gov.in/) +* [darksky](https://darksky.net/forecast/40.7127,-74.0059/us12/en) +* [weather bug](https://www.weatherbug.com/) +* [weather underground](https://www.wunderground.com/)
client: disable profiler of cas We don't see download slowness now.
@@ -604,18 +604,6 @@ def _fetch_and_map_with_cas(cas_client, digest, instance, output_dir, cache_dir, 'info', ] - # cpu profile may not work fast on armv7l. - # https://crbug.com/1197523#c10 - do_profile = platform.machine() != 'armv7l' - - if do_profile: - cmd.extend([ - '-profile-output-dir', - profile_dir, - '-profile-cpu', - '-profile-trace', - ]) - if kvs_dir: cmd.extend(['-kvs-dir', kvs_dir]) @@ -629,18 +617,6 @@ def _fetch_and_map_with_cas(cas_client, digest, instance, output_dir, cache_dir, file_path.rmtree(kvs_dir) _run_go_cmd_and_wait(cmd, tmp_dir) - if time.time() - start >= 30 and do_profile: - # If downloading takes long time, upload profile for later performance - # analysis. - try: - subprocess42.check_call([ - cas_client, 'archive', '-cas-instance', instance, '-paths', - profile_dir + ':.' - ]) - except Exception: - logging.error('Failed to upload profile data', exc_info=True) - on_error.report(None) - with open(result_json_path) as json_file: result_json = json.load(json_file)
Add CHANGELOG entries for 0.6.14 See also the 0.6-maintenance branch.
+0.6.14 2019-08-30 +----------------- + +* Bugfix follow Werkzeug LocalProxy name API. +* Bugfix ensure multiple files are correctly loaded. +* Bugfix ensure make_response status code is an int. +* Bugfix be clear about header encoding. +* Bugfix ensure loading form/files data is timeout protected. +* Bugfix add missing Unauthorized, Forbidden, and NotAcceptable + exception classes. + 0.9.1 2019-05-12 ----------------
implement tri-state logic for create_flatten_image Customer wants to have more granularity, they want to create flatten 'image', but not separate 'image' per layer.
@@ -32,7 +32,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): # TODO check if could be set globally, probably doesn't make sense when # flattened template cannot subset_template_name = "" - create_flatten_image = False + create_flatten_image = "no" # probably not possible to configure this globally flatten_subset_template = "" @@ -98,13 +98,16 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): "Subset {} already created, skipping.".format(subset)) continue - instance = self._create_instance(context, layer, resolved_family, + if self.create_flatten_image != "only": + instance = self._create_instance(context, layer, + resolved_family, asset_name, subset, task_name) + created_instances.append(instance) + existing_subset_names.append(subset) publishable_layers.append(layer) - created_instances.append(instance) - if self.create_flatten_image and publishable_layers: + if self.create_flatten_image != "no" and publishable_layers: self.log.debug("create_flatten_image") if not self.flatten_subset_template: self.log.warning("No template for flatten image") @@ -116,7 +119,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): first_layer = publishable_layers[0] # dummy layer first_layer.name = subset - family = created_instances[0].data["family"] # inherit family + family = resolved_family # inherit family instance = self._create_instance(context, first_layer, family, asset_name, subset, task_name)
docs/CARS: add 2022 Camry * Add 2022 Camry ICE DongleID/route f72a3ad7dc38d5cf|2021-11-14--09-21-28 * fix whitespace
| Toyota | Avalon 2016-21 | TSS-P | Stock<sup>3</sup>| 20mph<sup>1</sup> | 0mph | | Toyota | Avalon Hybrid 2019-21 | TSS-P | Stock<sup>3</sup>| 20mph<sup>1</sup> | 0mph | | Toyota | Camry 2018-20 | All | Stock | 0mph<sup>4</sup> | 0mph | -| Toyota | Camry 2021 | All | openpilot | 0mph<sup>4</sup> | 0mph | +| Toyota | Camry 2021-22 | All | openpilot | 0mph<sup>4</sup> | 0mph | | Toyota | Camry Hybrid 2018-20 | All | Stock | 0mph<sup>4</sup> | 0mph | | Toyota | Camry Hybrid 2021-22 | All | openpilot | 0mph | 0mph | | Toyota | C-HR 2017-20 | All | Stock | 0mph | 0mph |
Camera animation issue. Fixed, by getting correct camera object from depsgraph.
@@ -223,11 +223,12 @@ class RenderEngine(Engine): return # EXPORT CAMERA - camera_key = object.key(scene.camera) + camera_key = object.key(scene.camera) # current camera key rpr_camera = self.rpr_context.create_camera(camera_key) self.rpr_context.scene.set_camera(rpr_camera) - camera_obj = scene.camera + # camera object should be taken from depsgrapgh objects + camera_obj = depsgraph.objects[camera_key] self.camera_data = camera.CameraData.init_from_camera(camera_obj.data, camera_obj.matrix_world, screen_width / screen_height, border)
fix: minor changes Clear headline to avoid duplicate headlines. Dont show headline for new forms
@@ -8,14 +8,14 @@ frappe.ui.form.on('Web Template', { } frm.toggle_display('standard', frappe.boot.developer_mode); - frm.toggle_display('template', !frm.doc.standard); }, standard: function(frm) { - if (!frm.doc.standard) { + if (!frm.doc.standard && !frm.is_new()) { // If standard changes from true to false, hide template until // the next save. Changes will get overwritten from the backend // on save and should not be possible in the UI. frm.toggle_display('template', false); + frm.dashboard.clear_headline(); frm.dashboard.set_headline(__('Please save to edit the template.')); } }
postgresql compatibility for get_l3_agent routines This commit fixes a bug caused by the sqlalchemy group_by statement in the get_l3_agent routines when using postgresql. All select statements need to be replicated in the group_by statement. Closes-Bug:
@@ -96,7 +96,7 @@ class Agent(base.NeutronDbObject): rb_model.RouterL3AgentBinding.router_id ).label('count')).outerjoin( rb_model.RouterL3AgentBinding).group_by( - agent_model.Agent.id, + agent_model.Agent, rb_model.RouterL3AgentBinding .l3_agent_id).order_by('count') res = query.filter(agent_model.Agent.id.in_(agent_ids)).first() @@ -110,7 +110,7 @@ class Agent(base.NeutronDbObject): rb_model.RouterL3AgentBinding.router_id) .label('count')). outerjoin(rb_model.RouterL3AgentBinding). - group_by(agent_model.Agent.id). + group_by(agent_model.Agent). filter(agent_model.Agent.id.in_(agent_ids)). order_by('count')) agents = [cls._load_object(context, record[0]) for record in query]
Also auto-append .bat on windows (fixes This is apparently how dart wraps scripts on Windows
@@ -20,10 +20,11 @@ def add_extension_if_missing(server_binary_args: 'List[str]') -> 'List[str]': # what extensions should we append so CreateProcess can find it? # node has .cmd - # are .bat files common? + # dart has .bat # python has .exe wrappers - not needed - if path_to_executable and path_to_executable.lower().endswith('.cmd'): - executable_arg = executable_arg + ".cmd" + for extension in ['.cmd', '.bat']: + if path_to_executable and path_to_executable.lower().endswith(extension): + executable_arg = executable_arg + extension updated_args = [executable_arg] updated_args.extend(server_binary_args[1:]) return updated_args
import separate files from Ramda to prevent the entire ramda library from being bundled
/* eslint-disable no-undef,react/no-did-update-set-state,no-magic-numbers */ -import R from 'ramda'; +import { + comparator, + equals, + forEach, + has, + isEmpty, + lt, + path, + pathOr, + sort, +} from 'ramda'; import React from 'react'; import PropTypes from 'prop-types'; import {connect} from 'react-redux'; @@ -40,7 +50,7 @@ class Reloader extends React.Component { * then we could simply compare `props` with `prevProps` in * `componentDidUpdate`. */ - if (!R.isEmpty(props.reloadRequest) && props.reloadRequest.status !== 'loading') { + if (!isEmpty(props.reloadRequest) && props.reloadRequest.status !== 'loading') { return {reloadRequest: props.reloadRequest}; } } @@ -60,25 +70,25 @@ class Reloader extends React.Component { * The first reloadRequest defines the initial/baseline hash - * it doesn't require a reload */ - if (!R.has('reloadRequest', prevState)) { + if (!has('reloadRequest', prevState)) { return; } if (reloadRequest.status === 200 && - R.path(['content', 'reloadHash'], reloadRequest) !== - R.path(['reloadRequest', 'content', 'reloadHash'], prevState) + path(['content', 'reloadHash'], reloadRequest) !== + path(['reloadRequest', 'content', 'reloadHash'], prevState) ) { // Check for CSS (!content.hard) or new package assets if ( reloadRequest.content.hard || - !R.equals( + !equals( reloadRequest.content.packages.length, - R.pathOr([], ['reloadRequest', 'content', 'packages'], prevState).length + pathOr([], ['reloadRequest', 'content', 'packages'], prevState).length ) || - !R.equals( - R.sort(R.comparator(R.lt), reloadRequest.content.packages), - R.sort(R.comparator(R.lt), R.pathOr([], ['reloadRequest', 'content', 'packages'], prevState)) + !equals( + sort(comparator(lt), reloadRequest.content.packages), + sort(comparator(lt), pathOr([], ['reloadRequest', 'content', 'packages'], prevState)) ) ) { // Look if it was a css file. @@ -101,7 +111,7 @@ class Reloader extends React.Component { node = it.iterateNext(); } - R.forEach( + forEach( n => n.setAttribute('disabled', 'disabled'), nodesToDisable );
Lkt: add missing doc for the public GrammarDecl.lexer property TN:
@@ -974,11 +974,13 @@ class GrammarDecl(BaseGrammarDecl): syn_name = Field(type=T.DefId) rules = Field(type=T.FullDecl.list) - lexer = Property( - Entity.full_decl.get_annotation('with_lexer') - .params.params.at(0).value.as_entity.check_referenced_decl, - public=True - ) + @langkit_property(public=True) + def lexer(): + """ + Return the lexer that is associated to this grammar. + """ + return (Entity.full_decl.get_annotation('with_lexer') + .params.params.at(0).value.as_entity.check_referenced_decl) env_spec = EnvSpec( add_to_env_kv(Entity.name, Self),
Fix the view to origin This should prevent surprising scroll behavior, since (0, 0) is always in the view bounding box. The canvas can grow by moving items further away from the origin, but origin remains the anchor point. Also, origin is put in the upper left when opening a diagram.
@@ -207,7 +207,10 @@ class GtkView(Gtk.DrawingArea, Gtk.Scrollable): @property def bounding_box(self) -> Rectangle: """The bounding box of the complete view, relative to the view port.""" - return Rectangle(*self._qtree.soft_bounds) + bounds = Rectangle(*self._qtree.soft_bounds) + vx0, vy0 = self._matrix.transform_point(0, 0) + bounds += (vx0, vy0, 0, 0) + return bounds @property def hadjustment(self) -> Gtk.Adjustment: @@ -374,7 +377,7 @@ class GtkView(Gtk.DrawingArea, Gtk.Scrollable): def update_scrolling(self) -> None: allocation = self.get_allocation() self._scrolling.update_adjustments( - allocation.width, allocation.height, self._qtree.soft_bounds + allocation.width, allocation.height, self.bounding_box ) @g_async(single=True, priority=GLib.PRIORITY_HIGH_IDLE) @@ -484,7 +487,7 @@ class GtkView(Gtk.DrawingArea, Gtk.Scrollable): def on_resize(self, width: int, height: int) -> None: self._qtree.resize((0, 0, width, height)) - self._scrolling.update_adjustments(width, height, self._qtree.soft_bounds) + self.update_scrolling() if self.get_realized(): self._back_buffer_needs_resizing = True self.update_back_buffer()
reraise the same error instead of reraising the same class of error. Makes sure BBQ detects the same error.
@@ -189,7 +189,7 @@ def _networkimport(channel_id, update_progress=None, check_for_cancel=None): except OSError: pass ChannelMetadataCache.objects.filter(id=channel_id).delete() - raise UserCancelledError + raise connections.close_all() # close all DB connections (FIX for #1818) def _localimport(drive_id, update_progress=None, check_for_cancel=None): @@ -222,7 +222,7 @@ def _localimport(drive_id, update_progress=None, check_for_cancel=None): pass ChannelMetadataCache.objects.filter(id=channel_id).delete() connections.close_all() # close all DB connections (FIX for #1818)s - raise UserCancelledError + raise connections.close_all() # close all DB connections (FIX for #1818) @@ -249,7 +249,7 @@ def _localexport(drive_id, update_progress=None, check_for_cancel=None): except OSError: pass connections.close_all() # close all DB connections (FIX for #1818) - raise UserCancelledError + raise connections.close_all() # close all DB connections (FIX for #1818)
Added to new Plugins bottle-jwt: JSON Web Token authentication plugin for bottle.py bottle-smart-filters: Bottle Querystring smart guessing.
@@ -53,5 +53,13 @@ Have a look at :ref:`plugins` for general questions about plugins (installation, `Bottle-Werkzeug <http://pypi.python.org/pypi/bottle-werkzeug/>`_ Integrates the `werkzeug` library (alternative request and response objects, advanced debugging middleware and more). +`bottle-smart-filters <https://github.com/agile4you/bottle-smart-filters/>`_ + Bottle Querystring smart guessing. + +`bottle-jwt <https://github.com/agile4you/bottle-jwt/>`_ + JSON Web Token authentication plugin for bottle.py + + + Plugins listed here are not part of Bottle or the Bottle project, but developed and maintained by third parties.
ebuild.repository: UnconfiguredTree: add deprecated attr To build a packages restriction from profiles/package.deprecated for use by pkgcheck.
@@ -627,6 +627,12 @@ class UnconfiguredTree(prototype.tree): """Base package masks from profiles/package.mask.""" return frozenset(chain.from_iterable(repo._profile.masks[1] for repo in self.trees)) + @klass.jit_attr + def deprecated(self): + """Base deprecated packages restriction from profiles/package.deprecated.""" + return packages.OrRestriction(*chain.from_iterable( + repo._profile.pkg_deprecated[1] for repo in self.trees)) + def _regen_operation_helper(self, **kwds): return _RegenOpHelper( self, force=bool(kwds.get('force', False)),
Moves an import statement to be function-local. In order to keep the desired global-import dependencies to the tree structure described in the pyGSTi manual, an import from algorithms from within objects (not in the tree) has been relocated so it is local to the one function that needs it.
@@ -9,9 +9,6 @@ from __future__ import division, print_function, absolute_import, unicode_litera import numpy as _np import matplotlib.pyplot as plt -from ..algorithms import germselection as germsel - - class GermSetEval: def __init__(self, germset=None, gatesets=None, resultDict=None, errorDict=None): @@ -95,6 +92,8 @@ class GermSetEval: return {'fig': fig, 'ax': ax} def plot_spectra(self, axs=None): + from ..algorithms import germselection as germsel + missing = [key for key in self.__dict__ if key in ['germset', 'gatesets'] and self.__dict__[key] is None]
Update bibliography.bib Added references for LARS
@@ -40,6 +40,30 @@ eprint = {https://arc.aiaa.org/doi/pdf/10.2514/6.2019-3333} title = {Turbulence and the dynamics of coherent structures. I. Coherent structures} } +@article{LARS, + author = {Bradley Efron and Trevor Hastie and Iain Johnstone and Robert Tibshirani}, + title = {Least angle regression}, + journal = {The Annals of Statistics}, + year = {2004}, + volume = {32}, + number = {2}, + ISSN = {00905364}, + doi = {10.2307/3448465}, + pages = {407--451}, + publisher = {Institute of Mathematical Statistics}, +} + +@article{BLATMANLARS, + author = {G{\'{e}}raud Blatman and Bruno Sudret}, + title = {Adaptive sparse polynomial chaos expansion based on least angle regression}, + journal = {Journal of Computational Physics}, + year = {2011}, + volume = {230}, + number = {6}, + pages = {2345--2367}, + doi = {10.1016/j.jcp.2010.12.021}, +} + @article{HOSVD_1, author = {Giovanis, D.G. and Shields, M.D.}, title = {Variance-based simplex stochastic collocation with model order reduction for high-dimensional systems},
add option reverse for _get_selected, update docstring and use reverse = False in copy to clipboard
@@ -682,7 +682,7 @@ class FilterCoeffs(QWidget): cr = "\n" # newline character text = "" - sel = self._get_selected(self.tblCoeff)['sel'] + sel = self._get_selected(self.tblCoeff, reverse=False)['sel'] if not np.any(sel): # nothing selected -> copy everything raw from ba for r in range(self.num_rows): # text += qstr(self.tblCoeff.horizontalHeaderItem(r).text()) @@ -711,20 +711,23 @@ class FilterCoeffs(QWidget): #self.textLabel.setText(self.clipboard.text()) # read from clipboard #------------------------------------------------------------------------------ - def _get_selected(self, table): + def _get_selected(self, table, reverse=True): """ - get selected cells and return: - - indices of selected cells - - list of selected cells per column, sorted in reverse - - current cell selection + Get selected cells in `table`and return a dictionary with the following keys: + + 'idx': indices of selected cells as an unsorted list of tuples + + 'sel': list of selected cells per column, by default sorted in reverse + + 'cur': current cell selection as a tuple """ idx = [] for _ in table.selectedItems(): idx.append([_.column(), _.row(), ]) sel = [0, 0] - sel[0] = sorted([i[1] for i in idx if i[0] == 0], reverse = True) - sel[1] = sorted([i[1] for i in idx if i[0] == 1], reverse = True) + sel[0] = sorted([i[1] for i in idx if i[0] == 0], reverse = reverse) + sel[1] = sorted([i[1] for i in idx if i[0] == 1], reverse = reverse) # use set comprehension to eliminate multiple identical entries # cols = sorted(list({i[0] for i in idx}))
Reject abstract AST nodes with no concrete subclass (no-tn-check)
@@ -757,6 +757,24 @@ class CompileCtx(object): # Langkit_Support.Lexical_Env generic package requires it. T.env_md.require_hash_function() + def check_concrete_subclasses(self, astnode): + """ + Emit an error if `astnode` is abstract and has no concrete subclass. + + :param ASTNodeType astnode: AST node to check. + """ + # It's fine to have no list type, so as a special case we allow the + # generic list type to have no concrete subclass. + if astnode.is_generic_list_type or not astnode.abstract: + return + + check_source_language( + astnode.concrete_subclasses, + '{} is abstract and has no concrete subclass'.format( + astnode.dsl_name + ) + ) + def check_env_metadata(self, cls): """ Perform legality checks on `cls`, the env metadata struct. @@ -1271,6 +1289,8 @@ class CompileCtx(object): ASTNodePass('check homonym AST node fields', lambda _, astnode: astnode.check_homonym_fields(), auto_context=False), + ASTNodePass('reject abstract AST nodes with no concrete' + ' subclasses', CompileCtx.check_concrete_subclasses), GlobalPass('compute AST node kind constants', CompileCtx.compute_node_kind_constants), errors_checkpoint_pass,
check for empty reason after determining duration fixes
@@ -337,13 +337,14 @@ class Moderation(BaseCog): @commands.bot_has_permissions(ban_members=True) async def tempban(self, ctx: commands.Context, user: DiscordUser, duration: Duration, *, reason: Reason = ""): """tempban_help""" - if reason == "": - reason = Translator.translate("no_reason", ctx.guild.id) if duration.unit is None: parts = reason.split(" ") duration.unit = parts[0] reason = " ".join(parts[1:]) + if reason == "": + reason = Translator.translate("no_reason", ctx.guild.id) + member = ctx.guild.get_member(user.id) if member is not None: allowed, message = self._can_act("ban", ctx, member) @@ -629,12 +630,12 @@ class Moderation(BaseCog): @commands.bot_has_permissions(manage_roles=True, add_reactions=True) async def mute(self, ctx: commands.Context, target: discord.Member, duration: Duration, *, reason: Reason = ""): """mute_help""" - if reason == "": - reason = Translator.translate("no_reason", ctx.guild.id) if duration.unit is None: parts = reason.split(" ") duration.unit = parts[0] reason = " ".join(parts[1:]) + if reason == "": + reason = Translator.translate("no_reason", ctx.guild.id) roleid = Configuration.get_var(ctx.guild.id, "ROLES", "MUTE_ROLE") if roleid is 0: await ctx.send(
fw/version: Bump revison versions Bump the revision version for WA and the required version for devlib.
@@ -21,9 +21,9 @@ from subprocess import Popen, PIPE VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev']) -version = VersionTuple(3, 1, 1, 'dev1') +version = VersionTuple(3, 1, 2, '') -required_devlib_version = VersionTuple(1, 1, 0, 'dev1') +required_devlib_version = VersionTuple(1, 1, 1, '') def format_version(v):
Updates CI for C++11 (again..) TravisCI is.... picky...
#!/bin/bash # This script needs to be run as admin sudo apt-get update -sudo apt-get install g++ ##An example of how to search for a file in apt packages ## (useful for debugging TravisCI build errors) @@ -21,10 +20,25 @@ sudo apt-get install g++ apt-get install libsuitesparse-dev cp /usr/lib/liblapack.so /usr/lib/libsuitesparseconfig.so -sudo apt remove cmake -sudo apt-get install g++ +sudo add-apt-repository ppa:ubuntu-toolchain-r/test +sudo apt-get update + +sudo update-alternatives --remove-all gcc +sudo update-alternatives --remove-all g++ +sudo apt-get install gcc-4.8 +sudo apt-get install g++-4.8 +sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 20 +sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 20 +sudo update-alternatives --config gcc +sudo update-alternatives --config g++ +sudo apt-get update +sudo apt-get upgrade -y +sudo apt-get dist-upgrade + export CXX=g++ +sudo apt remove cmake + # Install the following version of CMAKE version=3.11 build=1
Change index for shaft and disks when importing This will make it standard to change the index from 1 based to 0 based which is the python standard. Before commit this was done only to the shaft.
@@ -117,7 +117,11 @@ def read_table_file(file, element, sheet_name=0, n=0, sheet_type="Model"): if row[i].lower() == header_key_word: header_index = index header_found = True - if "inches" in row[i].lower() or "lbm" in row[i].lower() or 'lb' in row[i].lower(): + if ( + "inches" in row[i].lower() + or "lbm" in row[i].lower() + or "lb" in row[i].lower() + ): convert_to_metric = True if "rpm" in row[i].lower(): convert_to_rad_per_sec = True @@ -246,6 +250,14 @@ def read_table_file(file, element, sheet_name=0, n=0, sheet_type="Model"): for i in range(0, df.shape[0]): new_material[i] = "shaft_mat_" + str(int(new_material[i])) parameters["material"] = new_material + + # change xltrc index to python index (0 based) + if element in ("shaft", "disk"): + new_n = parameters["n"] + for i in range(0, df.shape[0]): + new_n[i] -= 1 + parameters["n"] = new_n + if convert_to_metric: for i in range(0, df.shape[0]): if element == "bearing":
Fixes an error with canonical url. Summary: Deleted this section by mistake in last PR. Pull Request resolved:
@@ -142,6 +142,14 @@ html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] # further. For a list of options available for each theme, see the # documentation. +html_theme_options = { + 'pytorch_project': 'docs', + 'canonical_url': 'https://pytorch.org/docs/stable/', + 'collapse_navigation': False, + 'display_version': True, + 'logo_only': True, +} + html_logo = '_static/img/pytorch-logo-dark-unstable.png' if RELEASE: html_logo = '_static/img/pytorch-logo-dark.svg'
Fix Requests Session verify parameter Due to bad variable naming, the wrong value was being passed to the verify session parameter. Let's rename the variables to reflect the insecure SSL parameter semantics.
@@ -58,7 +58,7 @@ class DownloadRemoteSourcePlugin(PreBuildPlugin): self.log.info('Checking for additional configurations at %s', url) session = get_retrying_requests_session() - session.verify = insecure + session.verify = not insecure response = session.get(url) response_json = response.json() @@ -75,8 +75,8 @@ class DownloadRemoteSourcePlugin(PreBuildPlugin): # Download the source code archive cachito_config = get_cachito(self.workflow) - verify_cert = cachito_config.get('insecure', False) - archive = download_url(self.url, self.workflow.source.workdir, insecure=verify_cert) + insecure_ssl_conn = cachito_config.get('insecure', False) + archive = download_url(self.url, self.workflow.source.workdir, insecure=insecure_ssl_conn) # Unpack the source code archive into a dedicated dir in container build workdir dest_dir = os.path.join(self.workflow.builder.df_dir, self.REMOTE_SOURCE) @@ -90,7 +90,8 @@ class DownloadRemoteSourcePlugin(PreBuildPlugin): tf.extractall(dest_dir) # Get the remote source configurations - config_files = self.get_remote_source_config(self.remote_source_conf_url, verify_cert) or [] + config_files = self.get_remote_source_config( + self.remote_source_conf_url, insecure_ssl_conn) or [] # Inject cachito provided configuration files for config in config_files:
refactor: rate limiter decorator added We have rate limiter for reset passowrd alone and it is not re-usable for other endpoints. Added a generic rate limiter decorator that can be used for any endpoint.
from __future__ import unicode_literals from datetime import datetime +from functools import wraps +from typing import Union + +from werkzeug.wrappers import Response + import frappe from frappe import _ from frappe.utils import cint -from werkzeug.wrappers import Response def apply(): @@ -79,3 +83,40 @@ class RateLimiter: def respond(self): if self.rejected: return Response(_("Too Many Requests"), status=429) + +def rate_limit(key: str, limit: int = 5, seconds: int= 24*60*60, methods: Union[str, list]='ALL'): + """Decorator to rate limit an endpoint. + + This will limit Number of requests per endpoint to `limit` within `seconds`. + Uses redis cache to track request counts. + + :param key: Key is used to identify the requests uniqueness + :param limit: Maximum number of requests to allow with in window time + :param seconds: window time to allow requests + :param methods: Limit the validation for these methods. + `ALL` is a wildcard that applies rate limit on all methods. + :type methods: string or list or tuple + + :returns: a decorator function that limit the number of requests per endpoint + """ + def ratelimit_decorator(fun): + @wraps(fun) + def wrapper(*args, **kwargs): + # Do not apply rate limits if method is not opted to check + if methods != 'ALL' and frappe.request.method.upper() not in methods: + return frappe.call(fun, **frappe.form_dict) + + identity = frappe.form_dict[key] + cache_key = f"rl:{frappe.form_dict.cmd}:{identity}" + + value = frappe.cache().get_value(cache_key, expires=True) or 0 + if not value: + frappe.cache().set_value(cache_key, 0, expires_in_sec=seconds) + + value = frappe.cache().incrby(cache_key, 1) + if value > limit: + frappe.throw(_("You hit the rate limit because of too many requests. Please try after sometime.")) + + return frappe.call(fun, **frappe.form_dict) + return wrapper + return ratelimit_decorator
Misc cleanup reinstate `IsTopic` computed property remove duplicate KCircularLoader instantiate content and channel from Vuex
:genContentLink="genContentLink" @close="markAsComplete" /> - <KCircularLoader v-else /> </div> </template> ...mapGetters(['isUserLoggedIn', 'currentUserId']), ...mapState(['pageName']), ...mapState('topicsTree', { + content: state => state.content, contentId: state => state.content.content_id, contentNodeId: state => state.content.id, + channel: state => state.channel, channelId: state => state.content.channel_id, contentKind: state => state.content.kind, }), extraFields: state => state.core.logging.summary.extra_fields, fullName: state => state.core.session.full_name, }), - + isTopic() { + return this.content.kind === ContentNodeKinds.TOPIC; + }, progress() { if (this.isUserLoggedIn) { // if there no attempts for this exercise, there is no progress } return this.sessionProgress; }, - nextContentNodeRoute() { // HACK Use a the Resource Viewer Link instead if (this.pageName === ClassesPageNames.LESSON_RESOURCE_VIEWER) {
Switch to stable Juju 2.7 by default for addon Also allow overriding the version used with an environment variable.
@@ -4,20 +4,18 @@ set -eu source $SNAP/actions/common/utils.sh - function get_juju_client () { # check if juju cli is already in the system. Download if it doesn't exist. if [ ! -f "${SNAP_DATA}/bin/juju" ]; then + JUJU_VERSION="${JUJU_VERSION:-2.7.0}" + JUJU_SERIES=$(echo $JUJU_VERSION | sed 's|\.[0-9]\+$||') + run_with_sudo mkdir -p "$SNAP_DATA/bin" run_with_sudo mkdir -p "$SNAP_DATA/tmp" - run_with_sudo snap download juju --channel edge --target-directory="$SNAP_DATA/tmp" - run_with_sudo "$SNAP/usr/bin/unsquashfs" -d "$SNAP_DATA/tmp/juju" $SNAP_DATA/tmp/juju_*.snap bin/juju - run_with_sudo cp "$SNAP_DATA/tmp/juju/bin/juju" "$SNAP_DATA/bin" - # TODO: Re-enable this method when 2.7 hits stable -# sudo "${SNAP}/usr/bin/curl" -L https://launchpad.net/juju/2.6/2.6.4/+download/juju-2.6.4-centos7.tar.gz -o "$SNAP_DATA/tmp/juju.tar.gz" -# #sudo "${SNAP}/bin/tar" -zxvf "$SNAP_DATA/tmp/juju.tar.gz" -C "$SNAP_DATA/tmp" -# sudo tar -zxvf "$SNAP_DATA/tmp/juju.tar.gz" -C "$SNAP_DATA/tmp" -# sudo cp "$SNAP_DATA/tmp/juju-bin/juju" "$SNAP_DATA/bin" + run_with_sudo "${SNAP}/usr/bin/curl" -L https://launchpad.net/juju/$JUJU_SERIES/$JUJU_VERSION/+download/juju-$JUJU_VERSION-centos7.tar.gz -o "$SNAP_DATA/tmp/juju.tar.gz" + run_with_sudo "${SNAP}/bin/tar" -zxvf "$SNAP_DATA/tmp/juju.tar.gz" -C "$SNAP_DATA/tmp" + run_with_sudo tar -zxvf "$SNAP_DATA/tmp/juju.tar.gz" -C "$SNAP_DATA/tmp" + run_with_sudo cp "$SNAP_DATA/tmp/juju-bin/juju" "$SNAP_DATA/bin" run_with_sudo chmod uo+x "$SNAP_DATA/bin/juju" run_with_sudo mkdir -p "$SNAP_DATA/juju/share/juju" "$SNAP_DATA/juju-home" run_with_sudo chmod -R ug+rwX "$SNAP_DATA/juju/share/juju" "$SNAP_DATA/juju-home"
README.md: Fix broken link to contribution guide Commit moved the guide from docs/contributing.md -> CONTRIBUTING.md without updating the reference in docs/README.md. Commit attempted to fix the link but did not uppercase the filename as needed.
* [Changelog](changelog.md) * User documentation - + [Contributing to Conan Center Index](../contributing.md) + + [Contributing to Conan Center Index](../CONTRIBUTING.md) + [Adding Packages to ConanCenter](how_to_add_packages.md) + [Review Process](review_process.md) + [Packaging policy](packaging_policy.md)
Update ROADMAP.md Broken link in the scikitlearn example
@@ -78,7 +78,7 @@ accelerate community innovation and collaboration. * Cloud AI Platform integration with BulkInferrer * Multi Framework Support in TFX Components * Experimental -[Scikit Learn example in TFX](https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/experimental/iris_pipeline_sklearn_local.py) +[Scikit Learn example in TFX](https://github.com/tensorflow/tfx/blob/master/tfx/examples/penguin/experimental/penguin_utils_sklearn.py) * On Device * Support for TFJS in Evaluator component * Orchestration:
ROADMAP: Update roadmap * ROADMAP: Update roadmap The roadmap is out of date. So we need to update it. * Address comment * Address comment
@@ -10,21 +10,30 @@ This document defines the roadmap for TiDB development. ## TiDB: - [ ] Optimizer - - [ ] Refactor Ranger - - [ ] Optimize the statistics info + - [x] Refactor Ranger - [ ] Optimize the cost model + - [ ] Join Reorder +- [ ] Statistics + - [x] Update statistics dynamically according to the query feedback + - [x] Analyze table automatically + - [ ] Improve the accuracy of Row Count estimation - [ ] Executor + - [ ] Push down the Projection operator to the Coprocessor + - [ ] Improve the performance of the HashJoin operator - [ ] Parallel Operators - - [ ] Compact Row Format to reduce memory usage + - [x] Projection + - [ ] Aggregation + - [ ] Sort + - [x] Compact Row Format to reduce memory usage - [ ] File Sort -- [ ] Support View -- [ ] Support Window Function +- [ ] View +- [ ] Window Function - [ ] Common Table Expression - [ ] Table Partition -- [ ] Hash time index to resolve the issue with hot regions -- [ ] Reverse Index - [ ] Cluster Index - [ ] Improve DDL + - [x] Speed up Add Index operation + - [ ] Parallel DDL - [ ] Support `utf8_general_ci` collation ## TiKV: @@ -58,7 +67,7 @@ This document defines the roadmap for TiDB development. ## TiSpark: - [ ] Limit / Order push-down -- [ ] Access through the DAG interface and deprecate the Select interface +- [x] Access through the DAG interface and deprecate the Select interface - [ ] Index Join and parallel merge join - [ ] Data Federation
Tweaks to fix GPG signing. Ref:
@@ -46,11 +46,11 @@ jobs: path: dist - name: Configure GPG Key run: | - echo -n "${{ secrets.GPG_SIGNING_KEY }}" | base64 --decode | gpg --import + echo -n "${{ secrets.GPG_SIGNING_KEY }}" | base64 --decode | gpg --import --no-tty --batch --yes - name: Sign wheel - run: gpg --batch --pinentry loopback --passphrase ${{ secrets.GPG_PASSPHRASE }} --detach-sign --armor dist/${{ inputs.whl-file-name }} + run: gpg --batch --pinentry loopback --passphrase "${{ secrets.GPG_PASSPHRASE }}" --detach-sign --armor dist/${{ inputs.whl-file-name }} - name: Sign tarball - run: gpg --batch --pinentry loopback --passphrase ${{ secrets.GPG_PASSPHRASE }} --detach-sign --armor dist/${{ inputs.tar-file-name }} + run: gpg --batch --pinentry loopback --passphrase "${{ secrets.GPG_PASSPHRASE }}" --detach-sign --armor dist/${{ inputs.tar-file-name }} - name: Publish to TestPyPi if: ${{ inputs.test }} env:
Update the release calendar Updated the release planning date to the release calendar. <img width="245" alt="Screen Shot 2021-03-03 at 11 43 28 AM" src="https://user-images.githubusercontent.com/44108233/109744474-b3f0eb00-7c15-11eb-9a8d-14f1b2088e0d.png">
@@ -152,7 +152,11 @@ The chart below is the expected release dates of minor releases. +------------+---------+ | Date | Version | +============+=========+ -| 02/10 2020 | 1.7.0 | +| 03/08 2021 | 1.7.0 | ++------------+---------+ +| 04/30 2021 | 1.8.0 | ++------------+---------+ +| 06/11 2021 | 1.9.0 | +------------+---------+ Release Instructions
fix: fix target matching for secondary accounts Secondary accounts have a different unique_id so would fail to match during convert. Add match on device_serial_number. closes
@@ -117,11 +117,17 @@ class AlexaNotificationService(BaseNotificationService): # hide_serial(alexa.unique_id), # alexa.entity_id, # ) - if item in (alexa, alexa.name, alexa.unique_id, alexa.entity_id): + if item in ( + alexa, + alexa.name, + alexa.unique_id, + alexa.entity_id, + alexa.device_serial_number, + ): if type_ == "entities": converted = alexa elif type_ == "serialnumbers": - converted = alexa.unique_id + converted = alexa.device_serial_number elif type_ == "names": converted = alexa.name elif type_ == "entity_ids":
Add rflush It is required. Seems to works without it when a single rwrite is issued, but it is required anytime rwrite is used.
@@ -2473,6 +2473,7 @@ static int mrf_handler(request_rec *r) } ap_set_content_length(r,this_record->size); ap_rwrite(this_data,this_record->size,r); + ap_rflush(r); // Got a hit, do we log anything? if (!hit_count--) {
Make sure we clean up the runner when we quit This should also take care of the greenlets
@@ -492,6 +492,8 @@ def main(): events.quitting.fire() events.parallel_quitting.fire() + if runners.locust_runner is not None: + runners.locust_runner.quit() print_stats(runners.locust_runner.request_stats) print_percentile_stats(runners.locust_runner.request_stats) if options.csvfilebase:
Update README.md no s on packages
@@ -15,7 +15,7 @@ To train a model in FEDn you provide the client code (in 'client') as a tarball. ```bash tar -cf mnist.tar client gzip mnist.tar -cp mnist.tar.gz packages/ +cp mnist.tar.gz package/ ``` Navigate to 'https://localhost:8090/start' and follow the link to 'context' to upload the compute package.
hiero: update parse_container and ls to new functionality accepting track containers
@@ -124,11 +124,20 @@ def ls(): """ # get all track items from current timeline - all_track_items = lib.get_track_items() - - for track_item in all_track_items: - container = parse_container(track_item) - if container: + all_items = lib.get_track_items() + + # append all video tracks + for track in lib.get_current_sequence(): + if type(track) != hiero.core.VideoTrack: + continue + all_items.append(track) + + for item in all_items: + container = parse_container(item) + if isinstance(container, list): + for _c in container: + yield _c + elif container: yield container @@ -144,12 +153,7 @@ def parse_container(item, validate=True): dict: The container schema data for input containerized track item. """ - # convert tag metadata to normal keys names - if type(item) == hiero.core.VideoTrack: - data = lib.set_track_openpype_data(item) - else: - data = lib.set_track_item_pype_data(item) - + def data_to_container(item, data): if ( not data or data.get("id") != "pyblish.avalon.container" @@ -178,6 +182,19 @@ def parse_container(item, validate=True): return container + # convert tag metadata to normal keys names + if type(item) == hiero.core.VideoTrack: + return_list = [] + _data = lib.get_track_openpype_data(item) + # convert the data to list and validate them + for _, obj_data in _data.items(): + cotnainer = data_to_container(item, obj_data) + return_list.append(cotnainer) + return return_list + else: + _data = lib.get_track_item_pype_data(item) + return data_to_container(item, _data) + def update_container(track_item, data=None): """Update container data to input track_item's pype tag.
Hardcode docker username Rather than store it as a secret, which it is not, hardcode the value
@@ -34,7 +34,7 @@ jobs: - name: Docker login uses: docker/login-action@v1 with: - username: ${{ secrets.DOCKER_USERNAME }} + username: dimagi password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Run tests env:
Add gpu request support to k8s as a first pass, this hard-codes nvidia, which should be parameterized
@@ -896,18 +896,20 @@ class KubernetesDeploymentConfig(LongRunningServiceConfig): return kubernetes_env def get_resource_requirements(self) -> V1ResourceRequirements: - return V1ResourceRequirements( limits = { "cpu": self.get_cpus() + self.get_cpu_burst_add(), "memory": f"{self.get_mem()}Mi", "ephemeral-storage": f"{self.get_disk()}Mi", - }, + } requests = { "cpu": self.get_cpus(), "memory": f"{self.get_mem()}Mi", "ephemeral-storage": f"{self.get_disk()}Mi", - }, - ) + } + if self.get_gpus(): + limits["nvidia.com/gpu"] = self.get_gpus() + requests["nvidia.com/gpu"] = self.get_gpus() + return V1ResourceRequirements(limits=limits, requests=requests) def get_sidecar_resource_requirements( self, sidecar_name: str
doc/customdevices: use correct default argument Make it match the implementation.
@@ -109,11 +109,11 @@ class UARTDevice(): baudrate (int): Baudrate of the UART device. timeout (:ref:`time`): How long to wait during :meth:`.read` before giving up. If you choose ``None``, - it will wait forever. (*Default*: ``None``) + it will wait forever (*Default*: ``None``). """ pass - def read(self, length): + def read(self, length=1): """Read a given number of bytes from the buffer. Your program will wait until the requested number of bytes are @@ -121,7 +121,7 @@ class UARTDevice(): exception is raised. Arguments: - length (``int``): How many bytes to read. + length (``int``): How many bytes to read (*Default*: 1). Returns: ``bytes``: Bytes returned from the device.
Animation Editor : fix "multiple value" exception when dragging tangent ref
@@ -397,8 +397,10 @@ private: double solveForTime( const double tl, const double th, const double time ) const { - if( time <= 0.0 ) return 0.0; - if( time >= 1.0 ) return 1.0; + // NOTE : keeping tl and th in the range [0,1] ensures f is monotonic increasing over interval [0,1]. + + assert( 0.0 <= tl && tl <= 1.0 ); + assert( 0.0 <= th && th <= 1.0 ); // compute coeffs @@ -409,26 +411,21 @@ private: const double bt2 = bt + bt; const double at3 = at + at + at; - // check that f is monotonic and therefore has one (possibly repeated) real root. + // check that f is monotonic increasing over interval [0,1] and therefore has one (possibly repeated) real root. // - // NOTE : f is monotonic over the interval [0,1] when the solutions of f' either, - // both lie outside the interval (0,1) or lie in the interval (0,1) and are equal - // in which case the discriminant of f' is zero. - // NOTE : keeping tl and th in the range [0,1] ensures f is monotonic over interval [0,1]. + // NOTE : As f(0) = 0 and f(1) = 1, f is monotonic increasing iff f'(0) >= 0 and f'(1) >= 0 + // + // f'(0) = c(t) + // f'(1) = 3a(t) + 2b(t) + c(t) + // + // when th == 1 floating point imprecision gives f'(1) as slighty less than 0. - const double discriminant = bt2 * bt2 - 4.0 * at3 * ct; + assert( ( ct >= 0.0 ) && ( at3 + bt2 + ct >= ( ( th == 1.0 ) ? -1e-15 : 0.0 ) ) ); - if( discriminant > 1e-13 ) - { - const double q = - 0.5 * ( bt2 + std::copysign( std::sqrt( discriminant ), bt2 ) ); - const double s1 = q / at3; - const double s2 = ct / q; + // simple cases - if( ( 0.0 < s1 && s1 < 1.0 ) || ( 0.0 < s2 && s2 < 1.0 ) ) - { - throw IECore::Exception( "Animation : Bezier interpolation mode : curve segment has multiple values for given time." ); - } - } + if( time <= 0.0 ) return 0.0; + if( time >= 1.0 ) return 1.0; // root bracketed in interval [0,1]
Fix in the triangles.py class for the moment of inertia computation: A flipped sign, see given reference.
@@ -266,11 +266,11 @@ def mass_properties(triangles, (volume * (center_mass[[0, 2]]**2).sum()) inertia[2, 2] = integrated[4] + integrated[5] - \ (volume * (center_mass[[0, 1]]**2).sum()) - inertia[0, 1] = ( + inertia[0, 1] = - ( integrated[7] - (volume * np.product(center_mass[[0, 1]]))) - inertia[1, 2] = ( + inertia[1, 2] = - ( integrated[8] - (volume * np.product(center_mass[[1, 2]]))) - inertia[0, 2] = ( + inertia[0, 2] = - ( integrated[9] - (volume * np.product(center_mass[[0, 2]]))) inertia[2, 0] = inertia[0, 2] inertia[2, 1] = inertia[1, 2]
Make fieldnames of csv.DictReader Optional Also run stdlib/2and3/csv.pyi through black and isort
-from collections import OrderedDict import sys -from typing import Any, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Text, Type, Union - -from _csv import (_reader, - _writer, - reader as reader, - writer as writer, - register_dialect as register_dialect, - unregister_dialect as unregister_dialect, - get_dialect as get_dialect, - list_dialects as list_dialects, - field_size_limit as field_size_limit, +from _csv import ( QUOTE_ALL as QUOTE_ALL, QUOTE_MINIMAL as QUOTE_MINIMAL, QUOTE_NONE as QUOTE_NONE, QUOTE_NONNUMERIC as QUOTE_NONNUMERIC, Error as Error, + _reader, + _writer, + field_size_limit as field_size_limit, + get_dialect as get_dialect, + list_dialects as list_dialects, + reader as reader, + register_dialect as register_dialect, + unregister_dialect as unregister_dialect, + writer as writer, ) +from collections import OrderedDict +from typing import Any, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Text, Type, Union _Dialect = Union[str, Dialect, Type[Dialect]] _DictRow = Mapping[str, Any] @@ -56,7 +56,6 @@ if sys.version_info >= (3, 6): else: _DRMapping = Dict[str, str] - class DictReader(Iterator[_DRMapping]): restkey: Optional[str] restval: Optional[str] @@ -64,24 +63,37 @@ class DictReader(Iterator[_DRMapping]): dialect: _Dialect line_num: int fieldnames: Sequence[str] - def __init__(self, f: Iterable[Text], fieldnames: Sequence[str] = ..., - restkey: Optional[str] = ..., restval: Optional[str] = ..., dialect: _Dialect = ..., - *args: Any, **kwds: Any) -> None: ... + def __init__( + self, + f: Iterable[Text], + fieldnames: Optional[Sequence[str]] = ..., + restkey: Optional[str] = ..., + restval: Optional[str] = ..., + dialect: _Dialect = ..., + *args: Any, + **kwds: Any, + ) -> None: ... def __iter__(self) -> DictReader: ... if sys.version_info >= (3,): def __next__(self) -> _DRMapping: ... else: def next(self) -> _DRMapping: ... - class DictWriter(object): fieldnames: Sequence[str] restval: Optional[Any] extrasaction: str writer: _writer - def __init__(self, f: Any, fieldnames: Iterable[str], - restval: Optional[Any] = ..., extrasaction: str = ..., dialect: _Dialect = ..., - *args: Any, **kwds: Any) -> None: ... + def __init__( + self, + f: Any, + fieldnames: Iterable[str], + restval: Optional[Any] = ..., + extrasaction: str = ..., + dialect: _Dialect = ..., + *args: Any, + **kwds: Any, + ) -> None: ... def writeheader(self) -> None: ... def writerow(self, rowdict: _DictRow) -> None: ... def writerows(self, rowdicts: Iterable[_DictRow]) -> None: ...
Update histogram.py make default number of bins to be 100
@@ -55,7 +55,7 @@ def _hist_gray(gray_img, bins, lower_bound, upper_bound, mask=None): # return hist_data -def histogram(img, mask=None, bins=None, lower_bound=None, upper_bound=None, title=None): +def histogram(img, mask=None, bins=100, lower_bound=None, upper_bound=None, title=None): """Plot a histogram using ggplot :param img: (numpy.ndarray) = image to analyze :param mask: (numpy.ndarray) = (optional) binary mask made from selected contours, by default mask = None
trivial: more suitable log in set_admin_password We support to change passwd of both Windows and Linux. So "Admin password" is preferable to "Root password" in the log.
@@ -3411,7 +3411,7 @@ class ComputeManager(manager.Manager): try: self.driver.set_admin_password(instance, new_pass) - LOG.info("Root password set", instance=instance) + LOG.info("Admin password set", instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD)
Removed split Split was causing white character to return.
@@ -60,7 +60,7 @@ class GridEngineBatchSystem(AbstractGridEngineBatchSystem): def submitJob(self, subLine): process = subprocess.Popen(subLine, stdout=subprocess.PIPE) - result = int(process.stdout.readline().strip().split('.')[0]) + result = int(process.stdout.readline().strip()) return result def getJobExitCode(self, sgeJobID):
Update elf_mirai.txt Missed trails for binaries.
@@ -1479,3 +1479,26 @@ senpai.site # Reference: https://twitter.com/0xrb/status/1107592182100189184 /Pemex1.sh +/loligang.arc +/loligang.arm +/loligang.arm4 +/loligang.armv4l +/loligang.arm5 +/loligang.arm5n +/loligang.arm6 +/loligang.arm7 +/loligang.dbg +/loligang.i586 +/loligang.i686 +/loligang.m68k +/loligang.mips +/loligang.mips64 +/loligang.mpsl +/loligang.ppc +/loligang.sh4 +/loligang.spc +/loligang.sparc +/loligang.x32 +/loligang.x64 +/loligang.x86 +/loligang.x86_64
Skip pillar refresh test This test is flaky and fails intermittently, even with the `flaky` decoractor. Skipping for now until we can debug further.
@@ -13,6 +13,7 @@ import textwrap from tests.support.case import ModuleCase from tests.support.helpers import flaky from tests.support.paths import TMP_PILLAR_TREE +from tests.support.unit import skipIf # Import Salt Libs import salt.utils.files @@ -169,6 +170,7 @@ class SaltUtilSyncModuleTest(ModuleCase): self.assertEqual(ret, expected_return) +@skipIf(True, 'Pillar refresh test is flaky. Skipping for now.') class SaltUtilSyncPillarTest(ModuleCase): ''' Testcase for the saltutil sync pillar module
Fix entity_to_id mapping entity_to_id mapping must be initialised from all triples, and not just the training triples, as e.g. contains entities in the test part which do not occur in the train part (neither as subject, nor object).
@@ -32,8 +32,10 @@ def main(training_file, test_file, output_direc): # Step 1: Create instances log.info("Create instances") training_triples = load_triples(path=training_file) + test_triples = load_triples(path=test_file) + all_triples = np.concatenate([training_triples, test_triples], axis=0) - entity_to_id, relation_to_id = create_entity_and_relation_mappings(triples=training_triples) + entity_to_id, relation_to_id = create_entity_and_relation_mappings(triples=all_triples) mapped_training_triples = map_triples_elements_to_ids(triples=training_triples, entity_to_id=entity_to_id, rel_to_id=relation_to_id) @@ -75,7 +77,6 @@ def main(training_file, test_file, output_direc): ) # Step 4: Prepare test triples - test_triples = load_triples(path=test_file) mapped_test_triples = map_triples_elements_to_ids(triples=test_triples, entity_to_id=entity_to_id, rel_to_id=relation_to_id)
fix slack backend get_user_details contract correct username when USERNAME_WITH_TEAM settings enabled
@@ -40,7 +40,7 @@ class SlackOAuth2(BaseOAuth2): if self.setting('USERNAME_WITH_TEAM', True) and team and \ 'name' in team: - name = '{0}@{1}'.format(name, response['team']['name']) + username = '{0}@{1}'.format(username, response['team']['name']) return { 'username': username,
Add CRD admin permission to deployer SA This is following the instruction here
@@ -94,5 +94,11 @@ x-google-marketplace: deployerServiceAccount: roles: - type: ClusterRole # This is a cluster-wide ClusterRole + rulesType: CUSTOM # We specify our own custom RBAC roles + rules: + - apiGroups: ['apiextensions.k8s.io'] + resources: ['customresourcedefinitions'] + verbs: ['*'] + - type: Role # This is a namespaced Role rulesType: PREDEFINED rulesFromRoleName: edit # Use predefined role named "edit"
Include authorization.conf in snapshot * Add log to make clear when something is not copied This is helpful to know if for some reason the expected file hasn't been found and it won't be included in the snapshot. * Add authorization.conf to the snapshot
@@ -92,6 +92,14 @@ def copy_files_between_manager_and_snapshot(archive_root, # This is a 4.x+ install, files go where they went. data_to_copy = [(path, path) for path in data_to_copy] + # Include roles configuration file in snapshot + data_to_copy.append( + ( + '/opt/manager/authorization.conf', + 'authorization.conf', + ), + ) + local_cert_dir = os.path.dirname(get_local_rest_certificate()) if to_archive: data_to_copy.append((local_cert_dir, @@ -183,6 +191,7 @@ def restore_composer_files(archive_root): def copy_snapshot_path(source, destination): # source doesn't need to exist, then ignore if not os.path.exists(source): + ctx.logger.warning('Source not found: {0}. Skipping...'.format(source)) return ctx.logger.debug( 'Copying from dump: {0} to: {1}..'.format(source, destination))
Update documentation of GenericInvestmentStorageBlock Documentation of GenericInvestmentStorageBlock now uses same wording as the documentation of GenericStorageBlock. To avoid duplications, it redirects there where applicable.
@@ -221,7 +221,7 @@ class GenericStorageBlock(SimpleBlock): **The following constraints are created:** - Set last time step to the initial capacity if `balanced == True` + Set last time step to the initial capacity if :attr:`balanced == True` .. math:: E(n, t_{last}) = &E(n, -1)\\ &\forall n \in \textrm{STORAGES\_BALANCED} @@ -240,6 +240,8 @@ class GenericStorageBlock(SimpleBlock): invest\_relation\_input\_output(n) \\ \forall n \in \textrm{INVEST\_REL\_IN\_OUT} + + =========================== ======================= ========= symbol explanation attribute =========================== ======================= ========= @@ -428,22 +430,15 @@ class GenericInvestmentStorageBlock(SimpleBlock): **The following constraints are build:** Storage balance + Same as for :class:`.GenericStorageBlock`, except .. math:: - capacity(n, t) = &capacity(n, t\_previous(t)) \cdot - (1 - \delta(n, t)) \\ - &- (flow(n, target(n), t)) / (outflow\_conversion\_factor(n) \cdot - \tau) \\ - &+ flow(source(n), n, t) \cdot inflow\_conversion\_factor(n) \cdot \ - \tau \textrm{,} \\ - &\forall n \in \textrm{INVESTSTORAGES} \textrm{,} \\ - &\forall t \in \textrm{TIMESTEPS}. + n \in \textrm{INVESTSTORAGES} + Initial capacity of :class:`.network.Storage` .. math:: - capacity(n, t_{last}) = invest(n) \cdot - initial\_storage\_level(n), \\ - \forall n \in \textrm{INITIAL\_CAPACITY,} \\ - \forall t \in \textrm{TIMESTEPS}. + E(n, -1) = invest(n) \cdot c(n, -1), \\ + \forall n \in \textrm{INITIAL\_STORAGE\_LEVEL}. Connect the invest variables of the storage and the input flow. .. math:: InvestmentFlow.invest(source(n), n) + existing = @@ -462,13 +457,12 @@ class GenericInvestmentStorageBlock(SimpleBlock): \forall n \in \textrm{INVEST\_REL\_IN\_OUT} Maximal capacity :attr:`om.InvestmentStorage.max_capacity[n, t]` - .. math:: capacity(n, t) \leq invest(n) \cdot capacity\_min(n, t), \\ + .. math:: E(n, t) \leq invest(n) \cdot c_{min}(n, t), \\ \forall n \in \textrm{MAX\_INVESTSTORAGES,} \\ \forall t \in \textrm{TIMESTEPS}. Minimal capacity :attr:`om.InvestmentStorage.min_capacity[n, t]` - .. math:: capacity(n, t) \geq invest(n) \cdot capacity\_min(n, t), - \\ + .. math:: E(n, t) \geq invest(n) \cdot c_{min}(n, t), \\ \forall n \in \textrm{MIN\_INVESTSTORAGES,} \\ \forall t \in \textrm{TIMESTEPS}. @@ -483,6 +477,10 @@ class GenericInvestmentStorageBlock(SimpleBlock): :attr:`om.InvestStorages.investment_costs` and their value after optimization by :meth:`om.InvestStorages.investment_costs()` . + + The symbols are the same as in:class:`.GenericStorageBlock`. + + """ CONSTRAINT_GROUP = True
Cat program revised (v2) (bugfix) Additional change: * Fixed improper behavior if no arguments were passed
@@ -49,11 +49,11 @@ def no_files(): def main(): """Entry point of the cat program.""" - try: # Read the arguments passed to the program - with_files(sys.argv[1:]) - except IndexError: + if not sys.argv[1:]: no_files() + else: + with_files(sys.argv[1:]) if __name__ == "__main__": main()
Improve her_ddpg_fetchreach parameters Improve parameters of example her_ddpg_fetchreach; the original parameters are not working
"""This is an example to train a task with DDPG + HER algorithm. Here it creates a gym environment FetchReach. - -Results (may vary by seed): - AverageSuccessRate: 0.9 - RiseTime: epoch 8 """ import gym import tensorflow as tf @@ -67,11 +63,11 @@ def her_ddpg_fetchreach(ctxt=None, seed=1): qf_lr=1e-3, qf=qf, replay_buffer=replay_buffer, - target_update_tau=0.05, - steps_per_epoch=20, - max_path_length=100, + target_update_tau=0.01, + steps_per_epoch=50, + max_path_length=250, n_train_steps=40, - discount=0.9, + discount=0.95, exploration_policy=exploration_policy, policy_optimizer=tf.compat.v1.train.AdamOptimizer, qf_optimizer=tf.compat.v1.train.AdamOptimizer, @@ -80,7 +76,7 @@ def her_ddpg_fetchreach(ctxt=None, seed=1): runner.setup(algo=ddpg, env=env) - runner.train(n_epochs=50, batch_size=100) + runner.train(n_epochs=50, batch_size=256) her_ddpg_fetchreach()
[doc] Enable documentation generated for makecat.py script put all global settings into main and use global variables for access rename global variable "main" to "main_ns" use pywikibot.handle_args(args) before handling local options
@@ -58,10 +58,11 @@ class MakeCatBot(SingleSiteBot, NoRedirectPageBot): """Bot tries to find new articles for a given category.""" - @classmethod - def needcheck(cls, pl): + @staticmethod + def needcheck(pl): """Verify whether the current page may be processed.""" - if main: + global main_ns, checked, skipdates + if main_ns: if pl.namespace() != 0: return False if pl in checked: @@ -79,6 +80,10 @@ class MakeCatBot(SingleSiteBot, NoRedirectPageBot): def include(cls, pl, checklinks=True, realinclude=True, linkterm=None, summary=''): """Include the current page to the working category.""" + global mysite + global workingcat, parentcats, removeparent + global checkforward, checkbackward + global checked, tocheck cl = checklinks if linkterm: actualworkingcat = pywikibot.Category(mysite, workingcat.title(), @@ -125,6 +130,9 @@ class MakeCatBot(SingleSiteBot, NoRedirectPageBot): @classmethod def asktoadd(cls, pl, summary): """Work on current page and ask to add article to category.""" + global mysite + global checked, tocheck + global excludefile if pl.site != mysite: return if pl.isRedirectPage(): @@ -201,17 +209,35 @@ class MakeCatBot(SingleSiteBot, NoRedirectPageBot): pywikibot.output('Not understood.') -try: - checked = {} +def main(*args): + """ + Process command line arguments and invoke bot. + + If args is an empty list, sys.argv is used. + + @param args: command line arguments + @type args: list of unicode + """ + global main_ns, skipdates + global mysite + global workingcat, parentcats, removeparent + global checkforward, checkbackward + global checked, tocheck + global excludefile + + main_ns = True skipdates = False + removeparent = True checkforward = True checkbackward = True + checked = {} + tocheck = DequeGenerator() + checkbroken = True - removeparent = True - main = True workingcatname = '' - tocheck = DequeGenerator() - for arg in pywikibot.handle_args(): + + local_args = pywikibot.handle_args(args) + for arg in local_args: if arg.startswith('-nodate'): skipdates = True elif arg.startswith('-forward'): @@ -222,7 +248,7 @@ try: elif arg.startswith('-keepparent'): removeparent = False elif arg.startswith('-all'): - main = False + main_ns = False elif not workingcatname: workingcatname = arg @@ -296,6 +322,10 @@ try: if checkbroken or page.exists(): MakeCatBot.asktoadd(page, summary) + +if __name__ == '__main__': + try: + main() finally: try: excludefile.close()
Update to use the latest stable Ocean dwave-system -> 0.8.x dwave-hybrid -> 0.4.x
@@ -30,9 +30,9 @@ else: install_requires = [ 'dwave-networkx>=0.8.0,<0.9.0', - 'dwave-system>=0.7.0,<0.8.0', + 'dwave-system>=0.8.0,<0.9.0', 'dwave-qbsolv>=0.2.7,<0.3.0', - 'dwave-hybrid>=0.3.0,<0.4.0', + 'dwave-hybrid>=0.4.0,<0.5.0', 'dwave-neal>=0.5.0,<0.6.0', 'dwave-tabu>=0.2.0,<0.3.0', 'dimod>=0.8.0,<0.9.0',
Add BaseException to the restart catch Exception. Since Exception is not enough I added BaseException cos some Exception are not inherited from Exception but BaseException and Exception doesn't inherit from BaseException.
@@ -1021,7 +1021,7 @@ def main(): runApp() except KeyboardInterrupt: raise - except Exception as e: + except(BaseException, Exception) as e: if app.autoRestart(): # Wait 30 second and try to relaunch application time.sleep(30)
Cache the pre-built image The worker container can then use it Saves a *lot* of time (and otherwise wasted resources)
@@ -43,6 +43,8 @@ services: build: context: . target: dev + # Cache the built image to be used by the inventree-dev-worker process + image: inventree-dev-image ports: # Expose web server on port 8000 - 8000:8000 @@ -60,9 +62,7 @@ services: # Background worker process handles long-running or periodic tasks inventree-dev-worker: container_name: inventree-dev-worker - build: - context: . - target: dev + image: inventree-dev-image command: invoke worker depends_on: - inventree-dev-server
Fix deprecated scalar type in ATen/native/Distributions.cpp Summary: Pull Request resolved:
@@ -230,7 +230,7 @@ Tensor _s_gamma_cpu(const Tensor& alpha, Generator *gen) { Tensor _s_dirichlet_cpu(const Tensor& alpha, Generator *gen) { Tensor ret = at::zeros(alpha.sizes(), alpha.options()); - AT_DISPATCH_FLOATING_TYPES(ret.type(), "dirichlet", [&] { + AT_DISPATCH_FLOATING_TYPES(ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::zeros(alpha.sizes(), alpha.options().dtype(ScalarType::Double)); THGenerator* generator = get_generator(gen); std::lock_guard<std::mutex> lock(generator->mutex);
Fixes test_equal Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -560,19 +560,32 @@ class TestQuantizedOps(TestCase): qX2 = torch.quantize_linear(X2, scale=scale2, zero_point=zero_point2, dtype=torch_type2) - def equal_ref(X, params, X_scheme, X2, params2, X2_scheme): - if X_scheme != X2_scheme: + def equal_ref(qX, qX2): + if qX.qscheme() != qX2.qscheme(): return False - if params != params2: + if qX.shape != qX2.shape: return False - if X.shape != X2.shape: + if qX.qscheme() == torch.per_tensor_affine: + if qX.q_scale() != qX2.q_scale(): return False - if (X != X2).any(): + if qX.q_zero_point() != qX2.q_zero_point(): + return False + elif qX.qscheme() == torch.per_channel_affine: + if (qX.q_per_channel_scales() != + qX2.q_per_channel_scales()).any(): + return False + if (qX.q_per_channel_zero_points() != + qX2.q_per_channel_zero_points()).any(): + return False + else: + raise NotImplementedError("Don't know what to do with", + qX.qscheme()) + if (qX.int_repr().to(float) != qX2.int_repr().to(float)).any(): return False return True - self.assertEqual(qX.equal(qX), equal_ref(X, X_params, X_scheme, X, X_params, X_scheme)) - self.assertEqual(qX.equal(qX2), equal_ref(X, X_params, X_scheme, X2, X2_params, X2_scheme)) + self.assertEqual(qX.equal(qX), equal_ref(qX, qX)) + self.assertEqual(qX.equal(qX2), equal_ref(qX, qX2)) @unittest.skipIf(
lint Call list immediately becuase in python 3 values() return an iterator which can cause issues
@@ -362,9 +362,9 @@ class LocationTypesView(BaseDomainView): payload_loc_type_name_by_pk[loc_type['pk']] = loc_type['name'] if loc_type.get('code'): payload_loc_type_code_by_pk[loc_type['pk']] = loc_type['code'] - names = payload_loc_type_name_by_pk.values() + names = list(payload_loc_type_name_by_pk.values()) names_are_unique = len(names) == len(set(names)) - codes = payload_loc_type_code_by_pk.values() + codes = list(payload_loc_type_code_by_pk.values()) codes_are_unique = len(codes) == len(set(codes)) if not names_are_unique or not codes_are_unique: raise LocationConsistencyError("'name' and 'code' are supposed to be unique")
Update integration-PhishAI.yml * Update integration-PhishAI.yml Url should be URL, as in the rest of the integrations, to maintain consistency * Update integration-PhishAI.yml * Update integration-PhishAI.yml * Update integration-PhishAI.yml
@@ -107,7 +107,7 @@ script: ContentsFormat: formats.json, HumanReadable: md, EntryContext: { - 'Url(obj.Data.Url && obj.Data.Url===val.Data.Url)' : { + 'URL(obj.Data.URL && obj.Data.URL===val.Data.URL)' : { 'Data': ec.PhishAI.Url }, 'IP(obj.Hostname && obj.Hostname===val.Hostname)' : { @@ -178,3 +178,4 @@ script: type: string description: Check if url is phishing and get details about the brand that is being phished +releaseNotes: "-"
Fix for building OpenCAS On some systems, e.g. Ubuntu, the order of linking `math` library is important and when linking is done too early, openCAS making fails.
@@ -101,7 +101,7 @@ sync: # $(TARGET): $(TARGET).a @echo " LD " $@ - @$(CC) $(CFLAGS) $(LDFLAGS) -o $(TARGET) $< + @$(CC) $(CFLAGS) -o $(TARGET) $< $(LDFLAGS) $(TARGET).a: $(patsubst %,$(OBJDIR)%,$(OBJS)) @echo " AR " $@
[microNPU] removing extra bytes for workspace Given that microNPU codegen uses target hooks it undergoes the core compiler that updates workspace sizes. We dont need the additional sizes anymore. This commit removes the sizes.
@@ -222,7 +222,7 @@ def build_source(module, inputs, outputs, accel="ethos-u55-256", output_toleranc inputs=inputs, outputs=outputs, output_tolerance=output_tolerance, - extra_memory_in_bytes=16 * 1024 * 1024, + extra_memory_in_bytes=0, ), interface_api="c", use_unpacked_api=True,
cabana: optimize chart update optimize update
@@ -325,7 +325,6 @@ void ChartView::updateLineMarker(double current_sec) { chart()->plotArea().width() * (current_sec - axis_x->min()) / (axis_x->max() - axis_x->min()); if (int(line_marker->line().x1()) != x) { line_marker->setLine(x, 0, x, height()); - chart()->update(); } } @@ -410,6 +409,7 @@ void ChartView::mouseReleaseEvent(QMouseEvent *event) { } else { QGraphicsView::mouseReleaseEvent(event); } + setViewportUpdateMode(QGraphicsView::MinimalViewportUpdate); } void ChartView::mouseMoveEvent(QMouseEvent *ev) { @@ -436,6 +436,8 @@ void ChartView::mouseMoveEvent(QMouseEvent *ev) { track_line->setVisible(value != vals.end()); value_text->setVisible(value != vals.end()); track_ellipse->setVisible(value != vals.end()); + } else { + setViewportUpdateMode(QGraphicsView::FullViewportUpdate); } QChartView::mouseMoveEvent(ev); }
removed unnecessary call of `mdbx.stop_sync` Syncing will be stopped automatically when mdbx is garbage-collected. And if not, a force-quit does not hurt either.
@@ -488,9 +488,7 @@ class MaestralGuiApp(QtWidgets.QSystemTrayIcon): def quit(self): """Quit Maestral""" - if self.started and self.mdbx: - self.mdbx.stop_sync() - if not is_macos_bundle: + if self.started and self.mdbx and not is_macos_bundle: stop_maestral_daemon_process(CONFIG_NAME) self.deleteLater() QtCore.QCoreApplication.quit()
Standalone: Fix, for Linux remove both RPATH and RUNPATH. * The later also makes it load outside stuff, which should be avoided for real standalone usage as much as possible.
@@ -942,7 +942,7 @@ def getSharedLibraryRPATH(filename): ) for line in stdout.split(b"\n"): - if b"RPATH" in line: + if b"RPATH" in line or b"RUNPATH" in line: return line[line.find(b'[')+1:line.rfind(b']')] return None
Restrict carbon verson to compatable release Carbon 10.9.0 introduced CSS changes that break how the common properties module is displayed. This forces the editor to use an earlier compatible version.
"@elyra-ai/canvas": "^6.1.23", "@elyra/application": "^0.4.0", "autoprefixer": "^9.6.0", - "carbon-components": "^10.3.2", + "carbon-components": "~10.8.1", "json-loader": "^0.5.7", "react": "^16.8.6", "react-dom": "^16.8.6",
Update README.md Move virtualenv configuration to gitbook
@@ -85,15 +85,6 @@ GPU: NVIDIA, GTX 1080+ recommended 3. Join the community * Once you've made something cool, be sure to share it on the Discord \([https://discord.gg/t4WWBPF](https://discord.gg/t4WWBPF)\). -#### Optional `virtualenv`: - -If you use virtualenv: - -```bash - virtualenv --system-site-packages -p python3 hypergan - source hypergan/bin/activate -``` - ### Troubleshooting Make sure that your cuda, nvidia drivers, pillow, pytorch, and pytorch vision are the latest version.
SRIOV-VerifyVF-Connection.sh: small fix asterisk removed from 'find' commands. In some cases, it returned more interfaces than expected.
@@ -63,7 +63,7 @@ while [ $__iterator -le "$vf_count" ]; do synthetic_interface_vm_1=$(ip addr | grep $static_IP_1 | awk '{print $NF}') LogMsg "Synthetic interface found: $synthetic_interface_vm_1" - vf_interface_vm_1=$(find /sys/devices/* -name "*${synthetic_interface_vm_1}*" | grep "pci" | sed 's/\// /g' | awk '{print $12}') + vf_interface_vm_1=$(find /sys/devices/* -name "*${synthetic_interface_vm_1}" | grep "pci" | sed 's/\// /g' | awk '{print $12}') LogMsg "Virtual function found: $vf_interface_vm_1" # Ping the remote host @@ -97,7 +97,7 @@ while [ $__iterator -le "$vf_count" ]; do # Get the VF name from VM2 cmd_to_send="ip addr | grep \"$static_IP_2\" | awk '{print \$NF}'" synthetic_interface_vm_2=$(ssh -i "$HOME"/.ssh/"$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no "$remote_user"@"$static_IP_2" "$cmd_to_send") - cmd_to_send="find /sys/devices/* -name "*${synthetic_interface_vm_2}*" | grep pci | sed 's/\// /g' | awk '{print \$12}'" + cmd_to_send="find /sys/devices/* -name "*${synthetic_interface_vm_2}" | grep pci | sed 's/\// /g' | awk '{print \$12}'" vf_interface_vm_2=$(ssh -i "$HOME"/.ssh/"$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no "$remote_user"@"$static_IP_2" "$cmd_to_send") rx_value=$(ssh -i "$HOME"/.ssh/"$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no "$remote_user"@"$static_IP_2" cat /sys/class/net/"${vf_interface_vm_2}"/statistics/rx_packets)
Update settings.py Finally, I got a sample, that has illustrative scannings via 139 and 445 ports...
@@ -79,7 +79,7 @@ HIGH_PRIORITY_REFERENCES = ("bambenekconsulting.com", "github.com/stamparm/black CONSONANTS = "bcdfghjklmnpqrstvwxyz" BAD_TRAIL_PREFIXES = ("127.", "192.168.", "localhost") LOCALHOST_IP = {4: "127.0.0.1", 6: "::1"} -POTENTIAL_INFECTION_PORTS = (135, 445, 1433, 3389, 6379, 6892, 6893, 6901) +POTENTIAL_INFECTION_PORTS = (135, 139, 445, 1433, 3389, 6379, 6892, 6893, 6901) IGNORE_DNS_QUERY_SUFFIXES = set(("arpa", "local", "guest", "intranet", "int", "corp", "home", "lan", "intra", "intran", "workgroup", "localdomain", "url", "alienvault")) VALID_DNS_NAME_REGEX = r"\A[a-zA-Z0-9.-]*\.[a-zA-Z0-9-]+\Z" # Reference: http://stackoverflow.com/a/3523068 SUSPICIOUS_CONTENT_TYPES = ("application/vnd.ms-htmlhelp", "application/x-bsh", "application/x-chm", "application/x-sh", "application/x-shellscript", "application/hta", "text/x-scriptlet", "text/x-sh", "text/x-shellscript")
Fix `unit.fileserver.test_gitfs` for Windows Put `import pwd` in a try/except block Set `os.environ['USERNAME']` in windows using win_functions Add error function for `shutil.rmtree`
@@ -9,8 +9,12 @@ import os import shutil import tempfile import textwrap -import pwd import logging +import stat +try: + import pwd +except ImportError: + pass # Import 3rd-party libs import yaml @@ -189,7 +193,6 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): self.integration_base_files = os.path.join(FILES, 'file', 'base') # Create the dir if it doesn't already exist - try: shutil.copytree(self.integration_base_files, self.tmp_repo_dir + '/') except OSError: @@ -203,6 +206,10 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): if 'USERNAME' not in os.environ: try: + if salt.utils.is_windows(): + import salt.utils.win_functions + os.environ['USERNAME'] = salt.utils.win_functions.get_current_user() + else: os.environ['USERNAME'] = pwd.getpwuid(os.geteuid()).pw_name except AttributeError: log.error('Unable to get effective username, falling back to ' @@ -219,14 +226,18 @@ class GitFSTest(TestCase, LoaderModuleMockMixin): Remove the temporary git repository and gitfs cache directory to ensure a clean environment for each test. ''' - shutil.rmtree(self.tmp_repo_dir) - shutil.rmtree(self.tmp_cachedir) - shutil.rmtree(self.tmp_sock_dir) + shutil.rmtree(self.tmp_repo_dir, onerror=self._rmtree_error) + shutil.rmtree(self.tmp_cachedir, onerror=self._rmtree_error) + shutil.rmtree(self.tmp_sock_dir, onerror=self._rmtree_error) del self.tmp_repo_dir del self.tmp_cachedir del self.tmp_sock_dir del self.integration_base_files + def _rmtree_error(self, func, path, excinfo): + os.chmod(path, stat.S_IWRITE) + func(path) + def test_file_list(self): ret = gitfs.file_list(LOAD) self.assertIn('testfile', ret)
Fix 405 Method Not Allowed bug for GET method to /dev/futurecosts App-native URLs need to come before tastypie.resources ModelResource API/Classes in urls.py
@@ -104,13 +104,10 @@ urlpatterns = [ re_path(r'', include(stable_api.urls), name='ghpghx'), path('dev/', include('job.urls')), - re_path(r'', include(dev_api.urls), name='job'), - path('dev/', include('futurecosts.urls')), + re_path(r'', include(dev_api.urls), name='job'), re_path(r'', include(dev_api.urls), name='futurecosts'), - re_path(r'', include(stable_api.urls), name='ghpghx'), - re_path(r'(.*)', page_not_found, name='404'), ]
Correct Docstring For OcGetNode OcGetNode parse output of `oc get nodes -o yaml`
@@ -28,8 +28,8 @@ OcGetEndPoints - command ``oc get endpoints -o yaml --all-namespaces`` OcGetEvent - command ``oc get event -o yaml --all-namespaces`` -------------------------------------------------------------- -OcGetNode - command ``oc get node -o yaml`` -------------------------------------------- +OcGetNode - command ``oc get nodes -o yaml`` +-------------------------------------------- OcGetPod - command ``oc get pod -o yaml --all-namespaces`` ---------------------------------------------------------- @@ -143,7 +143,7 @@ class OcGetEvent(CommandParser, YAMLParser): @parser(Specs.oc_get_node) class OcGetNode(CommandParser, YAMLParser): - """Class to parse ``oc get node -o yaml --all-namespaces``""" + """Class to parse ``oc get nodes -o yaml``""" @property def nodes(self):
text_to_speech_demo: don't use "is" for string comparison I removed the comparison to "~", because "~" is not in _symbol_to_id, and therefore that check is redundant.
@@ -97,4 +97,4 @@ def _symbols_to_sequence(symbols): def _should_keep_symbol(s): - return s in _symbol_to_id and s is not '_' and s is not '~' + return s in _symbol_to_id and s != _pad
Avoid (future) cusparse name collision Summary: A future version of cusparse will define "cusparseGetErrorString." This PR simply updates PyTorch's name for this function to "getCusparseErrorString" to avoid the collision. Pull Request resolved:
namespace at { namespace native { namespace sparse { namespace cuda { -std::string cusparseGetErrorString(cusparseStatus_t status) { +std::string getCusparseErrorString(cusparseStatus_t status) { switch(status) { case CUSPARSE_STATUS_SUCCESS: @@ -55,7 +55,7 @@ std::string cusparseGetErrorString(cusparseStatus_t status) { inline void CUSPARSE_CHECK(cusparseStatus_t status) { if (status != CUSPARSE_STATUS_SUCCESS) { - AT_ERROR("cusparse runtime error: ", cusparseGetErrorString(status)); + AT_ERROR("cusparse runtime error: ", getCusparseErrorString(status)); } }
Init local variables It's UB to use uninitialized values as arguments.
@@ -661,7 +661,7 @@ void RealGees<T>::Kernel(void* out_tuple, void** data, XlaCustomCallStatus*) { const T* a_in = reinterpret_cast<T*>(data[4]); // bool* select (T, T) = reinterpret_cast<bool* (T, T)>(data[5]); - bool (*select)(T, T); + bool (*select)(T, T) = nullptr; void** out = reinterpret_cast<void**>(out_tuple); T* a_work = reinterpret_cast<T*>(out[0]); @@ -672,8 +672,7 @@ void RealGees<T>::Kernel(void* out_tuple, void** data, XlaCustomCallStatus*) { int* sdim_out = reinterpret_cast<int*>(out[4]); int* info_out = reinterpret_cast<int*>(out[5]); - bool* b_work; - if (sort == 'N') b_work = new bool[n]; + bool* b_work = (sort != 'N') ? (new bool[n]) : nullptr; T work_query; int lwork = -1; @@ -722,7 +721,7 @@ void ComplexGees<T>::Kernel(void* out_tuple, void** data, const T* a_in = reinterpret_cast<T*>(data[4]); // bool* select (T, T) = reinterpret_cast<bool* (T, T)>(data[5]); - bool (*select)(T); + bool (*select)(T) = nullptr; void** out = reinterpret_cast<void**>(out_tuple); T* a_work = reinterpret_cast<T*>(out[0]); @@ -733,8 +732,7 @@ void ComplexGees<T>::Kernel(void* out_tuple, void** data, int* sdim_out = reinterpret_cast<int*>(out[4]); int* info_out = reinterpret_cast<int*>(out[5]); - bool* b_work = nullptr; - if (sort != 'N') b_work = new bool[n]; + bool* b_work = (sort != 'N') ? (new bool[n]) : nullptr; T work_query; int lwork = -1;
Update version 0.7.6 -> 0.8.0 New Features * Embedding composites can now return embedding in sampleset's info field Changes * Upgraded to 0.6.x branch of the cloud client which adds support for unstructured solvers, and improves polling and error handling
# ============================================================================= __all__ = ['__version__', '__author__', '__authoremail__', '__description__'] -__version__ = '0.7.6' +__version__ = '0.8.0' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'All things D-Wave System.'
fix when task_data is not dict In legacy cases task might be only string with its name, not structure with additional metadata (type etc.). This implementation handles that.
@@ -121,10 +121,13 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): ): fill_pairs.append(("task", task_data["name"])) - else: + elif isinstance(task_data, dict): for key, value in task_data.items(): fill_key = "task[{}]".format(key) fill_pairs.append((fill_key, value)) + else: + # fallback for legacy - if task_data is only task name + fill_pairs.append(("task", task_data)) self.log.debug("fill_pairs ::{}".format(fill_pairs)) multiple_case_variants = prepare_template_data(fill_pairs)
refactors HCrystallBall Forecaster refactor of HCrystalBallForecaster, see
@@ -3,8 +3,7 @@ import pandas as pd from sklearn.base import clone from sktime.forecasting.base._base import DEFAULT_ALPHA -from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin -from sktime.forecasting.base._sktime import _SktimeForecaster +from sktime.forecasting.base import BaseForecaster from sktime.utils.validation._dependencies import _check_soft_dependencies _check_soft_dependencies("hcrystalball") @@ -94,30 +93,63 @@ def _adapt_y_pred(y_pred): return y_pred.iloc[:, 0] -class HCrystalBallForecaster(_OptionalForecastingHorizonMixin, _SktimeForecaster): +class HCrystalBallForecaster(BaseForecaster): + + _tags = { + "univariate-only": True, + "requires-fh-in-fit": False, + "handles-missing-data": False, + } + def __init__(self, model): self.model = model super(HCrystalBallForecaster, self).__init__() - def fit(self, y, X=None, fh=None): - self._is_fitted = False - self._set_y_X(y, X) - self._set_fh(fh) + def _fit(self, y, X=None, fh=None): + """Fit to training data. - if fh is not None: - _check_fh(self.fh, self.cutoff) + Parameters + ---------- + y : pd.Series + Target time series with which to fit the forecaster. + fh : int, list or np.array, optional (default=None) + The forecast horizon with the steps ahead to predict. + X : pd.DataFrame, optional (default=None) + Exogenous variables are ignored + + Returns + ------- + self : returns an instance of self. + """ y, X = _adapt_y_X(y, X) self.model_ = clone(self.model) self.model_.fit(X, y) - self._is_fitted = True return self def _predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): + """Make forecasts for the given forecast horizon + + Parameters + ---------- + fh : int, list or np.array + The forecast horizon with the steps ahead to predict + X : pd.DataFrame, optional (default=None) + Exogenous variables (ignored) + return_pred_int : bool, optional (default=False) + Return the prediction intervals for the forecast. + alpha : float or list, optional (default=0.95) + If alpha is iterable, multiple intervals will be calculated. + + Returns + ------- + y_pred : pd.Series + Point predictions for the forecast + y_pred_int : pd.DataFrame + """ if return_pred_int: raise NotImplementedError() - _check_fh(fh, self.cutoff) X_pred = _get_X_pred(X, index=fh.to_absolute(self.cutoff).to_pandas()) y_pred = self.model_.predict(X=X_pred)