message
stringlengths
13
484
diff
stringlengths
38
4.63k
Update the amount of solves shown on the chal window when solves are listed Closes
@@ -189,6 +189,7 @@ function updatesolves(cb){ function getsolves(id){ $.get(script_root + '/chal/'+id+'/solves', function (data) { var teams = data['teams']; + $('.chal-solves').text((parseInt(teams.length) + " Solves")); var box = $('#chal-solves-names'); box.empty(); for (var i = 0; i < teams.length; i++) {
gtk3: change editable to a widget in the header Now it's similar to the GTK4 solution. This saves some custom code, and allows to do some more code cleanup.
@@ -182,7 +182,6 @@ class MainWindow(Service, ActionProvider): ) self.title = builder.get_object("title") - if Gtk.get_major_version() != 3: self.modified = builder.get_object("modified") self.subtitle = builder.get_object("subtitle") self.set_title() @@ -210,7 +209,7 @@ class MainWindow(Service, ActionProvider): self.window.set_resizable(True) if Gtk.get_major_version() == 3: - self.window.show_all() + self.window.show() self.window.add_accel_group(shortcuts) self.window.connect("delete-event", self._on_window_close_request) self.window.connect("size-allocate", self._on_window_size_allocate) @@ -252,10 +251,6 @@ class MainWindow(Service, ActionProvider): else f"{gettext('New model')} - Gaphor" ) - if Gtk.get_major_version() == 3: - if self.model_changed: - title += " [" + gettext("edited") + "]" - else: self.modified.set_visible(self.model_changed) self.title.set_text(title)
Document that token-based upload is supported Closes
@@ -208,6 +208,10 @@ Environment variables See :ref:`uploading packages with environment variables <upload_envvars>` for more information. + Token-based upload to PyPI is supported. To upload using PyPI token, + set the ``FLIT_USERNAME`` value to ``__token__``, and the ``FLIT_PASSWORD`` + to the token value. + .. envvar:: FLIT_ALLOW_INVALID .. versionadded:: 0.13
Specification of BPE in tutorial I have forgot that also running script need to specify the BPE block and does not take it from the main config
@@ -278,6 +278,10 @@ As for the evaluation, you need to create ``translation_run.ini``: [main] test_datasets=[<eval_data>] + [bpe_preprocess] + class=processors.bpe.BPEPreprocessor + merge_file="exp-nm-mt/data/merge_file.bpe" + [eval_data] class=dataset.load_dataset_from_files s_source="exp-nm-mt/data/test/Batch3a_en.txt.gz"
Add __enter__, __exit__ to IMAP4, make __init__ arguments optional Fixes
@@ -29,7 +29,7 @@ class IMAP4: welcome: bytes = ... capabilities: Tuple[str] = ... PROTOCOL_VERSION: str = ... - def __init__(self, host: str, port: int) -> None: ... + def __init__(self, host: str = ..., port: int = ...) -> None: ... def __getattr__(self, attr: str) -> Any: ... host: str = ... port: int = ... @@ -54,6 +54,8 @@ class IMAP4: def deleteacl(self, mailbox: str, who: str) -> CommandResults: ... if sys.version_info >= (3, 5): def enable(self, capability: str) -> CommandResults: ... + def __enter__(self) -> IMAP4: ... + def __exit__(self, *args) -> None: ... def expunge(self) -> CommandResults: ... def fetch(self, message_set: str, message_parts: str) -> CommandResults: ... def getacl(self, mailbox: str) -> CommandResults: ...
Update Mixcloud oembed pattern, add https support. As per docs at
@@ -139,8 +139,8 @@ OEMBED_ENDPOINTS = { "http://video.yandex.ru/oembed.{format}": [ "^http://video\\.yandex\\.ru/users/[^#?/]+/view/.+$" ], - "http://www.mixcloud.com/oembed/": [ - "^http://www\\.mixcloud\\.com/oembed/[^#?/]+/.+$" + "https://www.mixcloud.com/oembed/": [ + "^https?://www\\.mixcloud\\.com/.+$" ], "http://www.kickstarter.com/services/oembed": [ "^http(?:s)://[-\\w]+\\.kickstarter\\.com/projects/.+$"
[test] Reduction sum test on macOS passes Possibly due to some differences of computation between macOS and other in Eigen library, the tolerance of backward test of sum had to be relaxed.
@@ -37,4 +37,8 @@ def test_reduction_forward_backward(op, seed, axis, keepdims, ctx, func_name): func_args=[axis], func_kwargs=dict(keepdims=keepdims), ctx=ctx, func_name=func_name, - atol_b=3e-3) + # The backward test on macOS doesn't pass with this torelance. + # Does Eigen library used in CPU computatation backend produce + # the different results on different platforms? + # atol_b=3e-3, + atol_b=6e-3)
Ignore endpoint updates from kube-system kube-scheduler and kube-controller-manager endpoints are updated almost every second, leading to terrible noise, and hence constant listener invokation. So, here we ignore endpoint updates from kube-system namespace. More:
@@ -157,6 +157,18 @@ func (w *Watcher) WatchNamespace(namespace, resources string, listener func(*Wat // assume this means we made the // change to them if oldUn.GetResourceVersion() != newUn.GetResourceVersion() { + // kube-scheduler and kube-controller-manager endpoints are + // updated almost every second, leading to terrible noise, + // and hence constant listener invokation. So, here we + // ignore endpoint updates from kube-system namespace. More: + // https://github.com/kubernetes/kubernetes/issues/41635 + // https://github.com/kubernetes/kubernetes/issues/34627 + if oldUn.GetKind() == "Endpoints" && + newUn.GetKind() == "Endpoints" && + oldUn.GetNamespace() == "kube-system" && + newUn.GetNamespace() == "kube-system" { + return + } invoke() } },
Allow retrying passed buildkite steps Summary: Plenty of times this is useful (trying to repro flakes, for example). I'm surprised it isn't the default
@@ -34,7 +34,8 @@ def __init__(self, label, key=None, timeout_in_minutes=None): "automatic": [ {"exit_status": -1, "limit": 2}, # agent lost {"exit_status": 255, "limit": 2}, # agent forced shut down - ] + ], + "manual": {"permit_on_passed": True}, }, } if key is not None:
Implemented suggestions by terminalmage Rephrasing and better linkage to the documentation
@@ -1772,7 +1772,7 @@ def upgrade(name=None, .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html .. versionchanged:: Fluorine - Added obsoletes and minimal arguments + Added ``obsoletes`` and ``minimal`` arguments Returns a dictionary containing the changes: @@ -1867,9 +1867,9 @@ def upgrade(name=None, .. versionadded:: Fluorine obsoletes : True - Controls wether dnf/yum should take obsoletes into account and remove them. - If set to False yum will use update instead of upgrade - and dnf will be run with --obsoletes=False + Controls wether yum/dnf should take obsoletes into account and remove them. + If set to ``False`` yum will use ``update`` instead of ``upgrade`` + and dnf will be run with ``--obsoletes=False`` .. code-block:: bash @@ -1962,15 +1962,16 @@ def update(name=None, obsoletes=False, **kwargs): ''' - Uses "upgrade" but defaults to obsoletes=False - Introduced to mirror expected CLI usage of yum update - For options see "upgrade" + .. versionadded:: Fluorine + + Calls :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` with + ``obsoletes=False``. Mirrors the CLI behavior of ``yum update``.See + :py:func:`pkg.upgrade <salt.modules.yumpkg.upgrade>` for further + documentation. .. code-block:: bash salt '*' pkg.update - - .. versionadded:: Fluorine ''' return upgrade(name, pkgs, refresh, skip_verify, normalize, minimal, obsoletes, **kwargs)
Docs: update the path of "meters.yaml" and its new feature The file has been moved from ceilometer/meter/data/meters.yaml to ceilometer/data/meters.d/meters.yaml, in order to support loading multiple meter definition files. So I think it is necessary to update the info in doc. Related-Bug:
@@ -205,7 +205,7 @@ Meter definitions The Telemetry service collects a subset of the meters by filtering notifications emitted by other OpenStack services. You can find the meter definitions in a separate configuration file, called -``ceilometer/meter/data/meters.yaml``. This enables +``ceilometer/data/meters.d/meters.yaml``. This enables operators/administrators to add new meters to Telemetry project by updating the ``meters.yaml`` file without any need for additional code changes. @@ -216,6 +216,10 @@ the ``meters.yaml`` file without any need for additional code changes. collected meters can differ in some cases from what is referenced in the documentation. +It also support loading multiple meter definition files and allow users to add +their own meter definitions into several files according to different types of +metrics under the directory of ``/etc/ceilometer/meters.d``. + A standard meter definition looks like: .. code-block:: yaml
add pillow>=6.2.0 to fix rtd error & remove comments rtd error: Pillow 5.4.1 is installed but pillow>=6.2.0 is required by {'matplotlib'}
+# Sorted --find-links https://download.pytorch.org/whl/torch_stable.html -# Needed only for torch-geometric -#torch-cluster==1.5.4 -#torch-scatter==2.0.4 -#torch-sparse==0.6.5 -#torch-spline-conv==1.2.0 -#torch-geometric==1.5.0 - -# Documentation packages nbsphinx nbsphinx-link -# scikit-image numpy -# Needed only for graph ogb==1.2.0 +pillow>=6.2.0 pytorch-lightning +# scikit-image scikit-learn tensorly torch==1.7.0+cpu
fw/exec: context: add write_job_specs Add a method to encapsulate the writing of ConfigManager's job specs into run_output.
@@ -188,6 +188,9 @@ class ExecutionContext(object): self.run_output.write_state() self.run_output.write_result() + def write_job_specs(self): + self.run_output.write_job_specs(self.cm.job_specs) + def get_resource(self, resource, strict=True): result = self.resolver.get(resource, strict) if result is None:
Deprecate `Request.is_xhr` The `X-Requested-With` header is not reliable because is not standard, so it's safe to deprecate this property
""" from functools import update_wrapper from datetime import datetime, timedelta +from warnings import warn from werkzeug.http import HTTP_STATUS_CODES, \ parse_accept_header, parse_cache_control_header, parse_etags, \ @@ -62,7 +63,6 @@ def _warn_if_string(iterable): to the WSGI server is not a string. """ if isinstance(iterable, string_types): - from warnings import warn warn(Warning('response iterable was set to a string. This appears ' 'to work but means that the server will send the ' 'data to the client char, by char. This is almost ' @@ -669,12 +669,22 @@ class BaseRequest(object): .. versionadded:: 0.7''') - is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '') - .lower() == 'xmlhttprequest', doc=''' - True if the request was triggered via a JavaScript XMLHttpRequest. + @property + def is_xhr(self): + """True if the request was triggered via a JavaScript XMLHttpRequest. This only works with libraries that support the `X-Requested-With` header and set it to "XMLHttpRequest". Libraries that do that are - prototype, jQuery and Mochikit and probably some more.''') + prototype, jQuery and Mochikit and probably some more. + + .. deprecated:: 0.13 + ``X-Requested-With`` is not standard and is unreliable.""" + warn(DeprecationWarning('Request.is_xhr is deprecated. ' + 'Given that the X-Requested-With header is ' + 'not a part of any spec, it is not reliable'), + stacklevel=2) + return self.environ.get('HTTP_X_REQUESTED_WITH', '').lower() \ + == 'xmlhttprequest' + is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https', doc='`True` if the request is secure.') is_multithread = environ_property('wsgi.multithread', doc='''
DOC: updated changelog Updated changelog with new enhancement information.
@@ -21,6 +21,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). - Replace `season_date_range` with `create_date_range`, old version is deprecated - Added deprecation warnings to stat functions - Removed `pysat_sgp4` instrument + - Added cleaning steps to the C/NOFS IVM ion fraction data - Bug fix - Fixed implementation of utils routines in model_utils and jro_isr - Fixed error catching bug in model_utils
Add Alcatel.7302 interface type HG-- branch : feature/microservices
@@ -44,6 +44,8 @@ class Script(BaseScript): "xdsl-channel": "physical", "atm-bonding": "physical", "atm": "physical", + "atm-ima": "physical", + "shdsl": "physical", "sw-loopback": "loopback", "bonding": "other" }
* Modify override behavior to search original params instead of template file. Fixes
@@ -321,10 +321,8 @@ class TaskCat(object): if key in param_index.keys(): idx = param_index[key] original_keys[idx] = override_pd - elif key in template_params: - original_keys.append(override_pd) else: - print(PrintMsg.INFO + "Cannot override [{}]! It's not present within the template!".format(key)) + print(PrintMsg.INFO + "Cannot apply overrides for the [{}] Parameter. You did not include this parameter in [{}]".format(key, self.get_parameter_file())) # check if s3 bucket and QSS3BucketName param match. fix if they dont. bucket_name = self.get_s3bucket()
Update README.md Mozilla Observatory ordered alphabetically
@@ -749,9 +749,9 @@ API | Description | Auth | HTTPS | CORS | | [FilterLists](https://filterlists.com) | Lists of filters for adblockers and firewalls | No | Yes | Unknown | | [FraudLabs Pro](https://www.fraudlabspro.com/developer/api/screen-order) | Screen order information using AI to detect frauds | `apiKey` | Yes | Unknown | | [HaveIBeenPwned](https://haveibeenpwned.com/API/v3) | Passwords which have previously been exposed in data breaches | `apiKey` | Yes | Unknown | +| [Intelligence X](https://github.com/IntelligenceX/SDK/blob/master/Intelligence%20X%20API.pdf) | Perform OSINT via Intelligence X | `apiKey` | Yes | Unknown | | [Mozilla http scanner](https://github.com/mozilla/http-observatory/blob/master/httpobs/docs/api.md) | Mozilla observatory http scanner | No | Yes | Unknown | | [Mozilla tls scanner](https://github.com/mozilla/tls-observatory#api-endpoints) | Mozilla observatory tls scanner | No | Yes | Unknown | -| [Intelligence X](https://github.com/IntelligenceX/SDK/blob/master/Intelligence%20X%20API.pdf) | Perform OSINT via Intelligence X | `apiKey` | Yes | Unknown | | [National Vulnerability Database](https://nvd.nist.gov/vuln/Data-Feeds/JSON-feed-changelog) | U.S. National Vulnerability Database | No | Yes | Unknown | | [Pulsedive](https://pulsedive.com/api/) | Scan, search and collect threat intelligence data in real-time | `apiKey` | Yes | Unknown | | [SecurityTrails](https://securitytrails.com/corp/apidocs) | Domain and IP related information such as current and historical WHOIS and DNS records | `apiKey` | Yes | Unknown |
issue make CallError inherit from object for 2.4/2.5. Otherwise cPickle will not call __reduce__().
@@ -250,11 +250,14 @@ class Kwargs(dict): return (Kwargs, (dict(self),)) -class CallError(Error): - """Serializable :class:`Error` subclass raised when - :meth:`Context.call() <mitogen.parent.Context.call>` fails. A copy of - the traceback from the external context is appended to the exception - message.""" +class CallError(Error, object): + """ + Serializable :class:`Error` subclass raised when :meth:`Context.call() + <mitogen.parent.Context.call>` fails. A copy of the traceback from the + external context is appended to the exception message. + """ + # Derives from object to force <2.7 pickle to call reduce. This may have + # unintended consequences, Exceptions in 2.x are classic classes. def __init__(self, fmt=None, *args): if not isinstance(fmt, BaseException): Error.__init__(self, fmt, *args)
build_emoji: Remove now unused `MissingGlyphError` exception. This exception was raised if there was no glyph available for a codepoint. We no longer need this.
@@ -90,10 +90,6 @@ if 'TRAVIS' in os.environ: # In Travis CI, we don't have root access EMOJI_CACHE_PATH = "/home/travis/zulip-emoji-cache" -class MissingGlyphError(Exception): - pass - - def main(): # type: () -> None # ttx is in the fonttools pacakge, the -z option is only on master
BUG: Actually show the "cvxopt not found" error message ValueError to match the "Unknown fit method l1_cvxopt_cp" previously raised by statsmodels/base/optimizer.py _check_method
@@ -356,7 +356,7 @@ class DiscreteModel(base.LikelihoodModel): from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp elif method.lower() == 'l1_cvxopt_cp': - message = ("Attempt to use l1_cvxopt_cp failed since cvxopt " + raise ValueError("Attempt to use l1_cvxopt_cp failed since cvxopt " "could not be imported") if callback is None:
Deploy to pypi only once when using build matrix Makes use of TravisCI build stages. Note that this feature is still in beta.
@@ -29,9 +29,10 @@ install: script: - pytest -v --cov=donkeycar donkeycar/tests -after_success: -- codecov - +jobs: + include: + - stage: deploy + script: skip deploy: provider: pypi user: wroscoe @@ -40,3 +41,6 @@ deploy: on: tags: true branch: master + +after_success: +- codecov \ No newline at end of file
MAINT: Quiet the anaconda uploads. The nightly uploads of the aarch64 wheels built on TravisCI are failing due to the maximum log length being exceeded. This quiets the anaconda output for that operation. Long term, we will probably want to shorten the test output also.
@@ -45,10 +45,10 @@ upload_wheels() { # sdists are located under dist folder when built through setup.py if compgen -G "./dist/*.gz"; then echo "Found sdist" - anaconda -t ${TOKEN} upload --skip -u ${ANACONDA_ORG} ./dist/*.gz + anaconda -q -t ${TOKEN} upload --skip -u ${ANACONDA_ORG} ./dist/*.gz elif compgen -G "./wheelhouse/*.whl"; then echo "Found wheel" - anaconda -t ${TOKEN} upload --skip -u ${ANACONDA_ORG} ./wheelhouse/*.whl + anaconda -q -t ${TOKEN} upload --skip -u ${ANACONDA_ORG} ./wheelhouse/*.whl else echo "Files do not exist" return 1
docs: Add translation policy on API error messages. Record Zulip's translation policy on API error messages.
@@ -101,6 +101,11 @@ Some useful tips for your translating journey: - Take advantage of the hotkeys the Transifex Web Editor provides, such as `Tab` for saving and going to the next string. +- While one should definitely prioritize translating + `translations.json`, since the most prominent user-facing strings + are there, API error messages in `django.po` are presented to users, + so a full translation should include them. + ### Testing translations This section assumes you have a
Update tables.md tables.md: Updating info as per release 0.10
@@ -5,22 +5,23 @@ is the BGP data that the bgp service collects from routers. To see what information is collected for each table, you can use the ```table describe table=<table name>``` via suzieq-cli to get the details. To see the list of tables, you can type ```help``` in suzieq-cli or run ```suzieq-cli --help```. -| | Cumulus Linux | Arista EOS | Linux | Cisco NXOS | Juniper JunOS | SONIC | -| :---------: | :---------------: | :------------: | :-------: | :------: | :-------: | :-------: | -| Arpnd | yes | yes | yes | yes | yes | yes | -| BGP | yes | yes | yes | yes | yes | yes | -| Device | yes | yes | yes | yes | yes | yes | -| EvpnVni | yes | no | no | yes* | yes | yes | -| Filesystem (fs) | yes | yes | yes | yes | no | yes | -| IfCounters | yes | yes | yes | no | no | yes | -| Interfaces | yes | yes | yes| yes | yes | yes | -| LLDP | yes | yes | yes | yes | yes | yes | -| Macs |yes | yes | yes | yes | yes | yes | -| MLAG | yes | yes | no | yes | no | no | -| Ospf |yes | yes | yes | yes | yes | yes | -| Routes | yes | yes | yes | yes | yes | yes | -| sqPoller | yes | yes | yes | yes | yes | yes | -| Topcpu | yes | yes | yes | yes | no | yes | -| Topmem | yes | yes | yes | no | no | yes | -| VLAN | yes | yes | yes | yes | yes | yes | +| | Cumulus Linux | Arista EOS | Linux | Cisco NXOS | Juniper JunOS | SONIC | IOSXR | +| :---------: | :---------------: | :------------: | :-------: | :------: | :-------: | :-------: | :-------: | +| Arpnd | yes | yes | yes | yes | yes | yes | yes | +| BGP | yes | yes | yes | yes | yes | yes | yes | +| Device | yes | yes | yes | yes | yes | yes | yes | +| EvpnVni | yes | yes | no | yes* | yes | yes | no | +| Filesystem (fs) | yes | yes | yes | yes | no | yes | no | +| IfCounters | yes | yes | yes | no | no | yes | no | +| Interfaces | yes | yes | yes| yes | yes | yes | yes | +| LLDP | yes | yes | yes | yes | yes | yes | yes | +| Macs |yes | yes | yes | yes | yes | yes | no | +| MLAG | yes | yes | no | yes | no | no | no | +| Ospf |yes | yes | yes | yes | yes | yes | no | +| Routes | yes | yes | yes | yes | yes | yes | yes | +| sqPoller | yes | yes | yes | yes | yes | yes | yes | +| Topcpu | yes | yes | yes | yes | no | yes | no | +| Topmem | yes | yes | yes | no | no | yes | no | +| VLAN | yes | yes | yes | yes | yes | yes | no | + \* - EVPN support for NXOS requires version 9.3.3 or above
add reasoning for the scale update of rwalk and somewhat update the eqn
@@ -148,18 +148,37 @@ class SuperSampler(Sampler): def update_rwalk(self, blob): """Update the random walk proposal scale based on the current - number of accepted/rejected steps.""" - + number of accepted/rejected steps. + For rwalk the scale is important because it + determines the speed of diffusion of points. + I.e. if scale is too large, the proposal efficiency will be very low + so it's likely that we'll only do one random walk step at the time, + thus producing very correlated chain. + """ self.scale = blob['scale'] accept, reject = blob['accept'], blob['reject'] facc = (1. * accept) / (accept + reject) - norm = max(self.facc, 1. - self.facc) * self.ncdim - self.scale *= math.exp((facc - self.facc) / norm) - self.scale = min(self.scale, math.sqrt(self.ncdim)) + # Here we are now trying to solve the Eqn + # f0 = F(s) where F is the function + # providing the acceptance rate given logscale + # and f0 is our target acceptance rate + # in this case a Newton like update to s + # is s_{k+1} = s_k - 1/F'(s_k) * (F_k - F_0) + # We can speculate that F(s)~ C*exp(-Ns) + # i.e. it's inversely proportional to volume + # Then F'(s) = -N * F \approx N * F_0 + # Therefore s_{k+1} = s_k + 1/(N*F_0) * (F_k-F0) + # See also Robbins-Munro recursion which we don't follow + # here because our coefficients a_k do not obey \sum a_k^2 = \infty + self.scale *= math.exp((facc - self.facc) / self.ncdim / self.facc) def update_slice(self, blob): """Update the slice proposal scale based on the relative - size of the slices compared to our initial guess.""" + size of the slices compared to our initial guess. + For slice sampling the scale is only 'advisory' in the sense that + the right scale will just speed up sampling as we'll have to expand + or contract less. It won't affect the quality of the samples much. + """ # see https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4063214/ # also 2002.06212 # https://www.tandfonline.com/doi/full/10.1080/10618600.2013.791193
Simplify cost_func caching Now only cache snapped values, since those correspond to results for an actual instance of the kernel.
@@ -64,10 +64,6 @@ def _cost_func(x, kernel_options, tuning_options, runner, results, cache): logging.debug('_cost_func called') logging.debug('x: ' + str(x)) - x_key = ",".join([str(i) for i in x]) - if x_key in cache: - return cache[x_key] - #snap values in x to nearest actual value for each parameter unscale x if needed if tuning_options.scaling: params = unscale_and_snap_to_nearest(x, tuning_options.tune_params, tuning_options.eps) @@ -76,6 +72,7 @@ def _cost_func(x, kernel_options, tuning_options, runner, results, cache): logging.debug('params ' + str(params)) + #we cache snapped values, since those correspond to results for an actual instance of the kernel x_int = ",".join([str(i) for i in params]) if x_int in cache: return cache[x_int] @@ -85,7 +82,6 @@ def _cost_func(x, kernel_options, tuning_options, runner, results, cache): legal = util.check_restrictions(tuning_options.restrictions, params, tuning_options.tune_params.keys(), tuning_options.verbose) if not legal: cache[x_int] = error_time - cache[x_key] = error_time return error_time #compile and benchmark this instance @@ -95,11 +91,9 @@ def _cost_func(x, kernel_options, tuning_options, runner, results, cache): if res: results.append(res[0]) cache[x_int] = res[0]['time'] - cache[x_key] = res[0]['time'] return res[0]['time'] cache[x_int] = error_time - cache[x_key] = error_time return error_time
Update dataset_api.py Replace Exception for a print in dataset.purge mthod
@@ -184,7 +184,8 @@ class DatasetRequestAPI(RequestAPI): while pref != "y" and pref != "n": pref = input("Invalid input '" + pref + "', please specify 'y' or 'n'.") if pref == "n": - raise Exception("Datasets deletion is cancelled.") + print("Datasets deletion is cancelled.") + return None for dataset in self.all(): self.delete(dataset_id=dataset.get("id"))
fix - use failed as class variable failed must be used as class variable. Test classes cannot have __init__, so this weird approach used for now.
@@ -51,6 +51,8 @@ class ModuleUnitTest(BaseTest): TEST_DATA_FOLDER = None + failed = False + @pytest.fixture(scope='session') def monkeypatch_session(self): """Monkeypatch couldn't be used with module or session fixtures."""
Replaced FIXME with proper docstring. All of the issues have been addressed or determined they weren't an issue
@@ -74,22 +74,7 @@ def plan_list(runtime, print_json): @pass_runtime(require_project=True, require_keychain=True) def plan_info(runtime, plan_name, messages_only): """ - plan_info FIXME: - - the original RFC lists a "recommended" column for steps; I don't know - where that data comes from - - I don't know if the step preflights are computed correctly - - the original RFC shows simple step numbers (1, 2, 3, etc) - but this version of the code shows nested steps (1/1/1.1, 1/1/1.2, etc) - - this code doesn't support json yet, though all of the data is collected - in a dict so it should be easy to support it if we want. - - the original RFC calls for a --plan option, but I set it to an argument - to be consistent with 'cci task info' and 'cci flow info' - - this code emits logging info when getting dependency info from github - should it? - - freezing the steps is relatively expensive; should we cache it or make - it optional (eg: --nosteps)? - - I think I need to configure the tables to emit True/False rather than - checkmarks (?) + Displays information for a MetaDeploy plan. """ plans = runtime.project_config.plans or {}
typo fixed a typo +label: docsite_pr
@@ -206,7 +206,7 @@ The following shows an example ``meta/main.yml`` file with dependent roles: If the source of a role is Galaxy, specify the role in the format *namespace.role_name*, as shown in the -above example. The more complex format used in *requirements.yml* is also supported, as deomonstrated by +above example. The more complex format used in *requirements.yml* is also supported, as demonstrated by the following: .. code-block:: yaml
Add class SummaryResults * Class used to store results and provide plots rotor summary. * This class aims to present a summary of the main parameters and attributes from a rotor model. The data is presented in a table format.
@@ -1712,6 +1712,98 @@ class StaticResults: return fig +class SummaryResults: + """Class used to store results and provide plots rotor summary. + + This class aims to present a summary of the main parameters and attributes + from a rotor model. The data is presented in a table format. + + Parameters + ---------- + df_shaft : dataframe + shaft dataframe + + Returns + ------- + table : bokeh WidgetBox + Bokeh WidgetBox with the summary table plot + """ + def __init__(self, df_shaft): + self.df_shaft = df_shaft + + def plot(self): + """Plot the summary table. + + This method plots: + Table with summary of rotor parameters and attributes + + Parameters + ---------- + + Returns + ------- + table : bokeh WidgetBox + Bokeh WidgetBox with the summary table plot + """ + materials = [mat.name for mat in self.df_shaft["material"]] + + data = dict( + tags=self.df_shaft["tag"], + lft_stn=self.df_shaft["n_l"], + rgt_stn=self.df_shaft["n_r"], + elem_no=self.df_shaft["_n"], + beam_left_loc=self.df_shaft["nodes_pos_l"], + elem_len=self.df_shaft["L"], + beam_cg=self.df_shaft["beam_cg"], + axial_cg_pos=self.df_shaft["axial_cg_pos"], + beam_right_loc=self.df_shaft["nodes_pos_r"], + material=materials, + mass=self.df_shaft["m"], + inertia=self.df_shaft["Ie"], + ) + source = ColumnDataSource(data) + + titles = [ + "Element Tag", + "Left Station", + "Right Station", + "Element Number", + "Elem. Left Location (m)", + "Elem. Lenght (m)", + "Element CG (m)", + "Axial CG Location (m)", + "Elem. Right Location (m)", + "Material", + "Elem. Mass (kg)", + "Inertia (m4)", + ] + + formatters = [ + None, + None, + None, + None, + NumberFormatter(format="0.000"), + NumberFormatter(format="0.000"), + NumberFormatter(format="0.000"), + NumberFormatter(format="0.000"), + NumberFormatter(format="0.000"), + None, + NumberFormatter(format="0.000"), + None, + ] + + columns = [ + TableColumn(field=str(field), title=title, formatter=form) + for field, title, form in zip(data.keys(), titles, formatters) + ] + + data_table = DataTable(source=source, columns=columns, width=1600) + table = widgetbox(data_table) + + return table + + class ConvergenceResults: """Class used to store results and provide plots for Convergence Analysis.
[IMPR] Removing poetry from travis **Is backwards compatible**: yes Removed poetry install from travis since we do not use poetry anymore.
@@ -32,7 +32,6 @@ addons: install: - pip install --upgrade pip - - pip install poetry - cd $TRAVIS_BUILD_DIR && make setup - pip install coveralls - sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 379CE192D401AB61
Fix bug spotted by tedsta@ Thanks!
@@ -675,7 +675,7 @@ class SampleCollector(object): else: self.metadata_providers = DEFAULT_METADATA_PROVIDERS - self.publishers = publishers.copy() + self.publishers = publishers[:] if publishers_from_flags: publishers.extend(SampleCollector._PublishersFromFlags()) if add_default_publishers:
[cleanup] pywikibot/site.py: simplify test The form using a tuple, isinstance(x, (A, B, ...)), is a shortcut for isinstance(x, A) or isinstance(x, B) or ... (etc.).
@@ -5814,9 +5814,9 @@ class APISite(BaseSite): if all(_ is None for _ in [rcid, revid, revision]): raise Error('No rcid, revid or revision provided.') - if isinstance(rcid, int) or isinstance(rcid, basestring): + if isinstance(rcid, (int, basestring)): rcid = {rcid} - if isinstance(revid, int) or isinstance(revid, basestring): + if isinstance(revid, (int, basestring)): revid = {revid} if isinstance(revision, pywikibot.page.Revision): revision = {revision}
fix: ensure string type for configflow closes
"issue_tracker": "https://github.com/custom-components/alexa_media_player/issues", "dependencies": ["persistent_notification"], "codeowners": ["@keatontaylor", "@alandtse"], - "requirements": ["alexapy==1.20.0", "packaging~=20.3", "wrapt~=1.12.1"] + "requirements": ["alexapy==1.20.1", "packaging~=20.3", "wrapt~=1.12.1"] }
allow null content in migration because existing rows won't have any content populated yet.
@@ -14,7 +14,7 @@ down_revision = '0334_broadcast_message_number' def upgrade(): - op.add_column('broadcast_message', sa.Column('content', sa.Text(), nullable=False)) + op.add_column('broadcast_message', sa.Column('content', sa.Text(), nullable=True)) op.alter_column('broadcast_message', 'template_id', nullable=True) op.alter_column('broadcast_message', 'template_version', nullable=True)
[Salt-cloud] Allow to ignore ssl with Xen provider Allow to ignore ssl verification for xen providers # /etc/salt/cloud.providers.d/xentest.conf xentest: ignore_ssl: True driver: xen Require a recent XenAPI.py which can be found here:
@@ -150,7 +150,14 @@ def _get_session(): __opts__, search_global=False ) - session = XenAPI.Session(url) + ignore_ssl = config.get_cloud_config_value( + 'ignore_ssl', + get_configured_provider(), + __opts__, + default=False, + search_global=False + ) + session = XenAPI.Session(url,ignore_ssl=ignore_ssl) log.debug('url: {} user: {} password: {}, originator: {}'.format( url, user,
Grammar+rephrasing Grammar and phrasing changes after review
# Create an SLO for availability for the custom service. # Example SLO is defined as following: -# 90% of all non-4XX requests within the past 30 day windowed period -# return with 200 OK status +# 90% of HTTP requests are successful within the past 30 day windowed period + resource "google_monitoring_slo" "custom_service_availability_slo" { count = length(var.custom_services) service = google_monitoring_custom_service.custom_service[count.index].service_id @@ -56,7 +56,7 @@ resource "google_monitoring_slo" "custom_service_availability_slo" { # Create another SLO on the custom service this time with respect to latency. # Example SLO is defined as following: -# 90% of requests that return 200 OK responses return in under 500 ms +# 90% of requests return in under 500 ms in the previous 30 days resource "google_monitoring_slo" "custom_service_latency_slo" { count = length(var.custom_services) service = google_monitoring_custom_service.custom_service[count.index].service_id @@ -91,8 +91,7 @@ resource "google_monitoring_slo" "custom_service_latency_slo" { # Create an SLO for availability for the Istio service. # Example SLO is defined as following: -# 90% of all non-4XX requests within the past 30 day windowed period -# return with 200 OK status +# 90% of HTTP requests are successful within the past 30 day windowed period resource "google_monitoring_slo" "istio_service_availability_slo" { count = length(var.istio_services) @@ -136,7 +135,7 @@ resource "google_monitoring_slo" "istio_service_availability_slo" { # Create an SLO with respect to latency using the Istio service. # Example SLO is defined as: -# 99% of requests that return 200 OK responses return in under 500 ms +# 99% of requests return in under 500 ms in the previous 30 days resource "google_monitoring_slo" "istio_service_latency_slo" { count = length(var.istio_services) service = "ist:${var.project_id}-zone-${var.zone}-cloud-ops-sandbox-default-${var.istio_services[count.index].service_id}" @@ -169,8 +168,8 @@ resource "google_monitoring_slo" "istio_service_latency_slo" { } # Rating service availability SLO: -# 99% of all non-4XX requests within the past 30 day windowed period -# return with 200 OK status +# 99% of HTTP requests are successful within the past 30 day windowed period + resource "google_monitoring_slo" "rating_service_availability_slo" { # Uses ratingservice service that is automatically detected and created when the service is deployed to App Engine # Identify of the service is built after the following template: gae:${project_id}_servicename @@ -216,7 +215,8 @@ resource "google_monitoring_slo" "rating_service_availability_slo" { } # Rating service latency SLO: -# 99% of requests that return 200 OK responses return in under 175 ms +# 99% of requests that return in under 175 ms in the previous 30 days + resource "google_monitoring_slo" "rating_service_latency_slo" { # Uses ratingservice service that is automatically detected and created when the service is deployed to App Engine # Identify of the service is built after the following template: gae:${project_id}_servicename
/integrations/: Focus search bar on page load. This focuses the search bar on initial page load.
@@ -370,7 +370,11 @@ function integration_events() { return false; }); - $(".integrations .searchbar input[type='text']").on('input', function (e) { + // combine selector use for both focusing the integrations searchbar and adding + // the input event. + $(".integrations .searchbar input[type='text']") + .focus() + .on('input', function (e) { dispatch('UPDATE_QUERY', { query : e.target.value.toLowerCase() }); });
Use async_forward_entry_setups instead of async_setup_platforms Replaces current async_setup_platforms function with async_forward_entry_setups, which will prevent the integration from failing to start in Home Assistant 2023.3+
@@ -174,7 +174,7 @@ async def async_initialize_integration( hacs.log.info("Update entities are only supported when using UI configuration") else: - hass.config_entries.async_setup_platforms( + await hass.config_entries.async_forward_entry_setups( config_entry, [Platform.SENSOR, Platform.UPDATE] if hacs.configuration.experimental
Update MEETING_SCHEDULE.md change to new schedule and link that allows for multiple other users to start the meeting
@@ -9,8 +9,10 @@ We hold troubleshooting sessions once a week on Thursdays, at 2:30 pm Eastern. ## Monthly Contributors Meeting -The Emissary-ingress Contributors Meeting is held on the first Wednesday of every month at 1pm Eastern. The focus of this meeting is discussion of technical issues related to development of Emissary-ingress. +The Emissary-ingress Contributors Meeting is held on the first Wednesday of every month at 3:30pm Eastern. The focus of this meeting is discussion of technical issues related to development of Emissary-ingress. New contributors are always welcome! Check out our [contributor's guide](https://github.com/emissary-ingress/emissary/blob/master/DEVELOPING.md) to learn how you can help make Emissary-ingress better. -**Zoom Meeting Link**: https://ambassadorlabs.zoom.us/j/89088869140 +**Zoom Meeting Link**: [https://ambassadorlabs.zoom.us/j/86139262248?pwd=bzZlcU96WjAxN2E1RFZFZXJXZ1FwQT09](https://ambassadorlabs.zoom.us/j/86139262248?pwd=bzZlcU96WjAxN2E1RFZFZXJXZ1FwQT09) +- Meeting ID: 861 3926 2248 +- Passcode: 113675
Fix mobile build Summary: Pull Request resolved: This was broken by but only showed up in master CI builds ghstack-source-id: Test Plan: CI
@@ -100,9 +100,9 @@ c10::OperatorOptions atenOperatorOptions() { return result; } -int (*DUMMY_OPERATION)(Stack&) = [](Stack& stack) -> int { +KernelFunction::InternalBoxedKernelFunction *DUMMY_OPERATION = + [](c10::OperatorKernel *, const c10::OperatorHandle &, std::vector<c10::IValue> *) -> void { TORCH_CHECK(false, "Operator has been stripped in the custom build.") - return 0; }; class Registerer final {
Don't check dependencies in setup.py no one would hardly notice anyway checking runtime dependencies at packaging time is waste of effort adds lot of code
@@ -13,30 +13,6 @@ from glob import glob import io -# check availability of runtime dependencies -def check_dependency(package, version): - """Issue a warning if the package is not available.""" - try: - import gi - gi.require_version(package.rsplit('.')[-1], version) - __import__(package) - except ImportError as e: - # caused by either of the imports, probably the first - logging.warning("Missing runtime dependencies:\n\t" + str(e)) - except ValueError as e: - # caused by the gi.require_version() statement - logging.warning("Missing runtime dependencies:\n\t" + str(e)) - except RuntimeError as e: - # caused by the final __import__() statement - logging.warning("Bad runtime dependency:\n\t" + str(e)) - - -check_dependency('gi.repository.Gio', '2.0') -check_dependency('gi.repository.GLib', '2.0') -check_dependency('gi.repository.Gtk', '3.0') -check_dependency('gi.repository.Notify', '0.7') - - # read long_description from README.rst long_description = None try:
fix(sw_index_daily_indicator): fix sw_index_daily_indicator interface fix sw_index_daily_indicator interface
@@ -182,9 +182,9 @@ def sw_index_daily( def sw_index_daily_indicator( - index_code: str = "801010", + index_code: str = "801003", start_date: str = "2019-12-01", - end_date: str = "2019-12-07", + end_date: str = "2021-09-07", data_type: str = "Day", ) -> pd.DataFrame: """ @@ -249,6 +249,7 @@ def sw_index_daily_indicator( temp_df = pd.DataFrame(data) temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date temp_df["close"] = pd.to_numeric(temp_df["close"]) + temp_df["volume"] = temp_df["volume"].apply(lambda x: x.replace(",", "")) temp_df["volume"] = pd.to_numeric(temp_df["volume"]) temp_df["chg_pct"] = pd.to_numeric(temp_df["chg_pct"]) temp_df["turn_rate"] = pd.to_numeric(temp_df["turn_rate"]) @@ -282,7 +283,7 @@ if __name__ == "__main__": print(sw_index_daily_df) sw_index_daily_indicator_df = sw_index_daily_indicator( - index_code="801001", + index_code="801003", start_date="2019-11-01", end_date="2019-12-07", data_type="Week",
Fix get_spent mongodb-based query fixes
@@ -153,14 +153,22 @@ def get_spent(conn, transaction_id, output): cursor = conn.run( conn.collection('bigchain').aggregate([ {'$match': { - 'block.transactions.inputs.fulfills.txid': transaction_id, - 'block.transactions.inputs.fulfills.output': output + 'block.transactions.inputs': { + '$elemMatch': { + 'fulfills.txid': transaction_id, + 'fulfills.output': output, + }, + }, }}, {'$unwind': '$block.transactions'}, {'$match': { - 'block.transactions.inputs.fulfills.txid': transaction_id, - 'block.transactions.inputs.fulfills.output': output - }} + 'block.transactions.inputs': { + '$elemMatch': { + 'fulfills.txid': transaction_id, + 'fulfills.output': output, + }, + }, + }}, ])) # we need to access some nested fields before returning so lets use a # generator to avoid having to read all records on the cursor at this point
Update test_tasks.py Fixed edge test case where wrong text encodings fail to parse canary file.
@@ -118,7 +118,7 @@ def verify_canary(task_path): files = [os.path.join(task_path, f) for f in files] for canary_file in files: - with open(canary_file, "r") as f: + with open(canary_file, "r", errors='ignore') as f: lines = f.readlines() is_canary = np.array([CANARY in l for l in lines]) is_empty = lines == []
Update conf.py Added redirect for partner program.
@@ -96,7 +96,7 @@ redirects = { "process/accepting-pull-request": "https://handbook.mattermost.com/contributors/contributors/help-wanted", "process/pm-faq": "https://handbook.mattermost.com/operations/research-and-development/product/product-management-team-handbook#frequently-asked-questions-faq", "process/product-manager": "https://handbook.mattermost.com/contributors/join-us/staff-recruiting/product-manager-hiring", - + "process/partner-programs": "https://handbook.mattermost.com/operations/sales/partner-programs", } # The master toctree document.
readme: changed doxs to point stable Instead to latest.
@@ -14,7 +14,7 @@ It has powerful and intuitive scheduling syntax that is easy to extend with cust It allows various levels of parallelization and various ways to parametrize tasks. It is suitable for simple to moderately sized projects from process automatization to IOT. -Read more from the documentations: [Red Engine, documentations](https://red-engine.readthedocs.io/en/latest/) +Read more from the documentations: [Red Engine, documentations](https://red-engine.readthedocs.io/en/stable/) ## Core Features
Fix snippets showing use of 'error_reporting.HTTPContext'. Closes
@@ -75,8 +75,9 @@ be used by Stackdriver Error Reporting to help group exceptions. >>> from google.cloud import error_reporting >>> client = error_reporting.Client() >>> user = '[email protected]' - >>> http_context = HTTPContext(method='GET', url='/', userAgent='test agent', - ... referrer='example.com', responseStatusCode=500, + >>> http_context = error_reporting.HTTPContext( + ... method='GET', url='/', user_agent='test agent', + ... referrer='example.com', response_status_code=500, ... remote_ip='1.2.3.4') >>> try: >>> raise NameError @@ -116,7 +117,8 @@ Similarly to reporting an exception, the user and HTTP context can be provided: >>> from google.cloud import error_reporting >>> client = error_reporting.Client() >>> user = '[email protected]' - >>> http_context = HTTPContext(method='GET', url='/', userAgent='test agent', - ... referrer='example.com', responseStatusCode=500, + >>> http_context = error_reporting.HTTPContext( + ... method='GET', url='/', user_agent='test agent', + ... referrer='example.com', response_status_code=500, ... remote_ip='1.2.3.4') >>> error_reporting.report("Found an error!", http_context=http_context, user=user))
rm use of deprecated `contextlib.nested` [ci skip] This could almost certainly be simplified further
@@ -28,10 +28,10 @@ from __future__ import absolute_import, print_function, unicode_literals from gevent import monkey monkey.patch_all() -import contextlib import os import re import sys +from contextlib import ExitStack import gevent import jsonobject @@ -185,9 +185,9 @@ def rebuild_staging(config, print_details=True, push=True): merge_conflicts = [] not_found = [] all_configs = list(config.span_configs()) - context_manager = contextlib.nested(*[OriginalBranch(get_git(path)) - for path, _ in all_configs]) - with context_manager: + with ExitStack() as stack: + for path, _ in all_configs: + stack.enter_context(OriginalBranch(get_git(path))) for path, config in all_configs: git = get_git(path) try:
removed tests for ACF feature generation The tests where removed as the featurizer is removed.
Tests for ConvMolFeaturizer. """ import unittest -import os import numpy as np -import pytest - -from deepchem.feat.graph_features import ConvMolFeaturizer, AtomicConvFeaturizer +from deepchem.feat.graph_features import ConvMolFeaturizer class TestConvMolFeaturizer(unittest.TestCase): @@ -98,38 +95,3 @@ class TestConvMolFeaturizer(unittest.TestCase): feat = featurizer.featurize(mols) for i, j in zip(feat, mols): assert len(i) == j.GetNumHeavyAtoms() - - -class TestAtomicConvFeaturizer(unittest.TestCase): - - @pytest.mark.slow - @pytest.mark.tensorflow - def test_feature_generation(self): - """Test if featurization works using AtomicConvFeaturizer.""" - dir_path = os.path.dirname(os.path.realpath(__file__)) - ligand_file = os.path.join(dir_path, "data/3zso_ligand_hyd.pdb") - protein_file = os.path.join(dir_path, "data/3zso_protein.pdb") - # Pulled from PDB files. For larger datasets with more PDBs, would use - # max num atoms instead of exact. - - frag1_num_atoms = 44 # for ligand atoms - frag2_num_atoms = 2336 # for protein atoms - complex_num_atoms = 2380 # in total - max_num_neighbors = 4 - # Cutoff in angstroms - neighbor_cutoff = 4 - - labels = np.array([0, 0]) - - featurizer = AtomicConvFeaturizer( - labels=labels, - batch_size=1, - epochs=1, - frag1_num_atoms=frag1_num_atoms, - frag2_num_atoms=frag2_num_atoms, - complex_num_atoms=complex_num_atoms, - max_num_neighbors=max_num_neighbors, - neighbor_cutoff=neighbor_cutoff) - - features, _ = featurizer.featurize([ligand_file, ligand_file], - [protein_file, protein_file])
check existing case before variant loading always check for old case id
@@ -207,6 +207,19 @@ class CaseHandler(object): # Build the case object case_obj = build_case(parsed_case, self) + # Check if case exists with old case id + old_caseid = '-'.join([case_obj['owner'], case_obj['display_name']]) + old_case = self.case(old_caseid) + if old_case: + logger.info("Update case id for existing case: %s -> %s", old_caseid, case_obj['_id']) + self.update_caseid(old_case, case_obj['_id']) + update = True + + # Check if case exists in database + existing_case = self.case(case_obj['_id']) + if existing_case and not update: + raise IntegrityError("Case %s already exists in database", case_obj['_id']) + files = [ {'file_name': 'vcf_snv', 'variant_type': 'clinical', 'category': 'snv'}, {'file_name': 'vcf_sv', 'variant_type': 'clinical', 'category': 'sv'}, @@ -235,22 +248,8 @@ class CaseHandler(object): except (IntegrityError, ValueError, ConfigError, KeyError) as error: logger.warning(error) - # Check if case exists with old case id - old_caseid = '-'.join([case_obj['owner'], case_obj['display_name']]) - old_case = self.case(old_caseid) - if old_case: - if update: - self.update_caseid(old_case, case_obj['_id']) - else: - raise IntegrityError("Case %s exists with old id", old_caseid) - - # Check if case exists in database - existing_case = self.case(case_obj['_id']) - if existing_case: - if update: + if existing_case and update: self.update_case(case_obj) - else: - raise IntegrityError("Case {0} already exists in database".format(case_obj['_id'])) else: logger.info('Loading case %s into database', case_obj['display_name']) self._add_case(case_obj)
Update README.rst update code
@@ -168,6 +168,7 @@ In pipeline, you can build NN structures in a keras style. Take Homo-NN as an ex Firstly, import keras and define your nn structures: .. code:: python + from tensorflow.keras import optimizers from tensorflow.keras.layers import Dense @@ -178,6 +179,7 @@ Then, add nn layers into Homo-NN model like using Sequential class in keras: .. code:: python from pipeline.component.homo_nn import HomoNN + # set parameter homo_nn_0 = HomoNN(name="homo_nn_0", max_iter=10, batch_size=-1, early_stop={"early_stop": "diff", "eps": 0.0001}) homo_nn_0.add(layer_0) @@ -186,12 +188,14 @@ Then, add nn layers into Homo-NN model like using Sequential class in keras: Set optimizer and compile Homo-NN model: .. code:: python + homo_nn_0.compile(optimizer=optimizers.Adam(learning_rate=0.05), metrics=["Hinge", "accuracy", "AUC"], loss="binary_crossentropy") Add it to pipeline: .. code:: python + pipeline.add_component(homo_nn, data=Data(train_data=dataio_0.output.data))
Fixes log formatiing string. Closes-Bug:
@@ -1167,7 +1167,7 @@ class IPMIManagement(base.ManagementInterface): LOG.info('For node %(node_uuid)s, ' 'driver_info[\'ipmi_disable_boot_timeout\'] is set ' 'to False, so not sending ipmi boot-timeout-disable', - {'node_uuid', task.node.uuid}) + {'node_uuid': task.node.uuid}) ifbd = task.node.driver_info.get('ipmi_force_boot_device', False) if strutils.bool_from_string(ifbd):
refactor: Use placeholder instead of additional option in select Additional options were still selectable which could have got through Minor formatting changes
@@ -397,14 +397,19 @@ frappe.setup.slides_settings = [ }, { fieldtype: "Section Break" }, { - fieldname: "timezone", label: __("Time Zone"), reqd: 1, + fieldname: "timezone", + label: __("Time Zone"), + placeholder: __('Select Time Zone'), + reqd: 1, fieldtype: "Select", - }, { fieldtype: "Column Break" }, { - fieldname: "currency", label: __("Currency"), reqd: 1, - fieldtype: "Select" + fieldname: "currency", + label: __("Currency"), + placeholder: __('Select Currency'), + reqd: 1, + fieldtype: "Select", } ], @@ -514,7 +519,7 @@ frappe.setup.utils = { frappe.setup.data.email = r.message.email; callback(slide); } - }) + }); }, setup_language_field: function (slide) { @@ -538,12 +543,12 @@ frappe.setup.utils = { .empty() .add_options( frappe.utils.unique( - ["Select Currency"].concat($.map(data.country_info, opts => opts.currency).sort()) + $.map(data.country_info, opts => opts.currency).sort() ) ); slide.get_input("timezone").empty() - .add_options(["Select Timezone"].concat(data.all_timezones)); + .add_options(data.all_timezones); // set values if present if (frappe.wizard.values.country) { @@ -552,13 +557,9 @@ frappe.setup.utils = { country_field.set_input(data.default_country); } - if (frappe.wizard.values.currency) { slide.get_field("currency").set_input(frappe.wizard.values.currency); - } - if (frappe.wizard.values.timezone) { slide.get_field("timezone").set_input(frappe.wizard.values.timezone); - } }, @@ -595,16 +596,13 @@ frappe.setup.utils = { $timezone.empty(); // add country specific timezones first - if (country !== "Select Country" ) { - var timezone_list = data.country_info[country].timezones || []; + const timezone_list = data.country_info[country].timezones || []; $timezone.add_options(timezone_list.sort()); slide.get_field("currency").set_input(data.country_info[country].currency); slide.get_field("currency").$input.trigger("change"); - } // add all timezones at the end, so that user has the option to change it to any timezone - $timezone.add_options(["Select Timezone"].concat(data.all_timezones)); - + $timezone.add_options(data.all_timezones); slide.get_field("timezone").set_input($timezone.val()); // temporarily set date format @@ -614,7 +612,6 @@ frappe.setup.utils = { slide.get_input("currency").on("change", function () { var currency = slide.get_input("currency").val(); - if (currency === "Select Currency") return; frappe.model.with_doc("Currency", currency, function () { frappe.provide("locals.:Currency." + currency); var currency_doc = frappe.model.get_doc("Currency", currency); @@ -622,7 +619,7 @@ frappe.setup.utils = { if (number_format === "#.###") { number_format = "#.###,##"; } else if (number_format === "#,###") { - number_format = "#,###.##" + number_format = "#,###.##"; } frappe.boot.sysdefaults.number_format = number_format;
doc: attaching virtual persistent memory to guests Add a document for virtual persistent memory Partially-Implements: blueprint virtual-persistent-memory
@@ -31,3 +31,4 @@ instance for these kind of workloads. virtual-gpu file-backed-memory port_with_resource_request + virtual-persistent-memory
Can_Reach: protect against null input AST nodes TN:
@@ -2957,14 +2957,13 @@ package body ${ada_lib_name}.Analysis is -- Can_Reach -- --------------- - function Can_Reach (El, From : ${root_node_type_name}) return Boolean - is + function Can_Reach (El, From : ${root_node_type_name}) return Boolean is begin -- Since this function is only used to implement sequential semantics in -- envs, we consider that elements coming from different units are -- always visible for each other, and let the user implement language -- specific visibility rules in the DSL. - if El.Unit /= From.Unit then + if El = null or else El.Unit /= From.Unit then return True; end if;
Detect PHP before HTML Closes Related This change assumes you have Prettier PHP installed.
@@ -470,10 +470,10 @@ class JsPrettierCommand(sublime_plugin.TextCommand): return True if self.is_yaml(view) is True: return True - if self.is_html(view) is True: - return True if self.is_php(view) is True: return True + if self.is_html(view) is True: + return True if is_file_auto_formattable(view) is True: return True return False
[core] Cleanly abort on ctrl+c During debugging, being able to cleanly (i.e. without backtrace) exit using ctrl+c is a very welcome functionality :)
@@ -38,6 +38,9 @@ def main(): inp=inp, ) engine.run() + except KeyboardInterrupt as error: + inp.stop() + sys.exit(0) except BaseException as e: logging.exception(e) if output.started(): @@ -56,18 +59,6 @@ def main(): output.flush() output.end() time.sleep(1) -# try: -# except KeyboardInterrupt as error: -# inp.stop() -# sys.exit(0) -# except bumblebee.error.BaseError as error: -# inp.stop() -# sys.stderr.write("fatal: {}\n".format(error)) -# sys.exit(1) -# except Exception as error: -# inp.stop() -# sys.stderr.write("fatal: {}\n".format(error)) -# sys.exit(2) if __name__ == "__main__": main()
Update user_guide.md * Update user_guide.md fix typo
@@ -684,7 +684,7 @@ Model Definition The model definition is the core of Ludwig. It is a dictionary that contains all the information needed to build and train a Ludwig model. -I mixes ease of use, by means of reasonable defaults, with flexibility, by means of detailed control over the parameters of your model. +It mixes ease of use, by means of reasonable defaults, with flexibility, by means of detailed control over the parameters of your model. It is provided to both `experiment` and `train` commands either as a string (`--model_definition`) or as a file (`--model_definition_file`). The string or the content of the file will be parsed by PyYAML into a dictionary in memory, so any style of YAML accepted by the parser is considered to be valid, so both multiline and oneline formats are accepted. For instance a list of dictionaries can be written both as @@ -733,7 +733,7 @@ For instance a `sequence` feature can be encoded by a `stacked_cnn` or by and `r A list of all the encoders available for all the datatypes alongside with the description of all parameters will be provided in the datatype-specific sections. Some datatypes have only one type of encoder, so you are not required to specify it. -The role of the encoders is to map inputs into tensors, usually vectors in the case fo datatype without a temporal / sequential aspect, matrices in case there is a temporal / sequential aspect or higher rank tensors in case there is a spatial or a spatio-temporal aspect to the input data. +The role of the encoders is to map inputs into tensors, usually vectors in the case of datatype without a temporal / sequential aspect, matrices in case there is a temporal / sequential aspect or higher rank tensors in case there is a spatial or a spatio-temporal aspect to the input data. Different configurations of the same encoder may return a tensor with different rank, for instance a sequential encoder may return a vector of size `h` that is either the final vector of a sequence or the result of pooling over the sequence length, or it can return a matrix of size `l x h` where `l` is the length of the sequence and `h` is the hidden dimension if you specify the pooling reduce operation (`reduce_output`) to be `null`. For the sake of simplicity you can imagine the output to be a vector in most of the cases, but there is a `reduce_output` parameter one can specify to change the default behavior. @@ -924,7 +924,7 @@ The parameters available for preprocessing are Binary features have no encoder, the raw binary values coming from the input placeholders are just returned as outputs. By consequence there are no encoding parameters. -Inputs are of size `b` while outputs are fo size `b x 1` where `b` is the batch size. +Inputs are of size `b` while outputs are of size `b x 1` where `b` is the batch size. Example binary feature entry in the output features list:
Update v3 identity domain negative tests to work w/ pre-prov I don't see any limitations by using pre-provisioned credentials for these tests: * test_create_domain_with_empty_name * test_create_domain_with_name_length_over_64 * test_delete_active_domain * test_delete_non_existent_domain * test_domain_create_duplicate
@@ -20,6 +20,10 @@ from tempest.lib import exceptions as lib_exc class DomainsNegativeTestJSON(base.BaseIdentityV3AdminTest): + # NOTE: force_tenant_isolation is true in the base class by default but + # overridden to false here to allow test execution for clouds using the + # pre-provisioned credentials provider. + force_tenant_isolation = False @decorators.attr(type=['negative', 'gate']) @decorators.idempotent_id('1f3fbff5-4e44-400d-9ca1-d953f05f609b')
add more parser cases fix function parameter parsing remove extraneous type annotations
@@ -109,8 +109,53 @@ def test_modifiers(): ] +def test_structures(): + assert eval(str(fully_parse("[1 1+|`nice`"))) == [ + [ + "if_stmt", + [ + [ + ["none", ["number", "1"]], + ["none", ["number", "1"]], + ["none", ["general", "+"]], + ], + [["none", ["string", "nice"]]], + ], + ] + ] + + assert eval(str(fully_parse("1 10r(i|n2*,"))) == [ + ["none", ["number", "1"]], + ["none", ["number", "10"]], + ["none", ["general", "r"]], + [ + "for_loop", + [ + "i", + [ + ["none", ["general", "n"]], + ["none", ["number", "2"]], + ["none", ["general", "*"]], + ["none", ["general", ","]], + ], + ], + ], + ] + + assert eval(str(fully_parse("@triple:1|3*;"))) == [ + [ + "function", + [ + ["triple", "1"], + [["none", ["number", "3"]], ["none", ["general", "*"]]], + ], + ] + ] + + if __name__ == "__main__": test_basic() test_fizzbuzz() test_modifiers() + test_structures() print("everything passed")
speed up sameAsReferenceImplementation This takes ~15 minutes right now
@@ -279,8 +279,8 @@ class BinaryHeapSuite { import Gen._ val ops = for { - maxOrExtract <- buildableOfN(1024, oneOfGen(const(Max()), const(ExtractMax()))) - ranks <- distinctBuildableOfN(1024, arbitrary[Long]) + maxOrExtract <- buildableOfN(64, oneOfGen(const(Max()), const(ExtractMax()))) + ranks <- distinctBuildableOfN(64, arbitrary[Long]) inserts = ranks.map(r => Insert(r, r)) ret <- Gen.shuffle(inserts ++ maxOrExtract) } yield ret
[hotfix] remove setup fail message Else it also appears for non-cloud users, where we don't receive an email:
@@ -217,12 +217,6 @@ frappe.setup.SetupWizard = class SetupWizard extends frappe.ui.Slides { this.$working_state.find('.state-icon-container').html(''); fail_msg = fail_msg ? fail_msg : __("Failed to complete setup"); - if(error && !frappe.boot.developer_mode) { - frappe.msgprint(`Don't worry. It's not you, it's us. We've - received the issue details and will get back to you on the solution. - Please feel free to contact us on [email protected] in the meantime.`); - } - this.update_setup_message('Could not start up: ' + fail_msg); this.$working_state.find('.title').html('Setup failed');
feat(fund_em_aum_hist): add fund_em_aum_hist interface add fund_em_aum_hist interface
@@ -141,7 +141,7 @@ def stock_sina_lhb_jgzz(recent_day: str = "5") -> pd.DataFrame: except: last_page_num = 1 big_df = pd.DataFrame() - for page in tqdm(range(1, last_page_num + 1), leave=False,): + for page in tqdm(range(1, last_page_num + 1), leave=False): params = { "last": recent_day, "p": page, @@ -196,13 +196,13 @@ if __name__ == "__main__": ) print(stock_sina_lhb_detail_daily_df) - stock_sina_lhb_ggtj_df = stock_sina_lhb_ggtj(recent_day="10") + stock_sina_lhb_ggtj_df = stock_sina_lhb_ggtj(recent_day="60") print(stock_sina_lhb_ggtj_df) stock_sina_lhb_yytj_df = stock_sina_lhb_yytj(recent_day="5") print(stock_sina_lhb_yytj_df) - stock_sina_lhb_jgzz_df = stock_sina_lhb_jgzz(recent_day="5") + stock_sina_lhb_jgzz_df = stock_sina_lhb_jgzz(recent_day="10") print(stock_sina_lhb_jgzz_df) stock_sina_lhb_jgmx_df = stock_sina_lhb_jgmx()
docs: fix unescaped html tag If the description of the method contains any HTML tag, it will break the HTML rendering. This commit escapes the html tag to prevent this problem. * Use html standard lib when using python3 * Use cgi standard lib when using python2 Refs Release-As: 1.8.2
@@ -247,6 +247,12 @@ def method(name, doc): """ params = method_params(doc) + if sys.version_info.major >= 3: + import html + doc = html.escape(doc) + else: + import cgi + doc = cgi.escape(doc) return string.Template(METHOD_TEMPLATE).substitute( name=name, params=params, doc=doc )
Use cached guildfiles when loading from dir Fixes essentially a typo.
@@ -824,7 +824,7 @@ def from_dir(path, filenames=None): model_file = os.path.abspath(os.path.join(path, name)) if os.path.isfile(model_file): log.debug("found model source '%s'", model_file) - return _load_guildfile(model_file) + return from_file(model_file) raise NoModels(path) def is_guildfile_dir(path):
what to do with `X`? either continue/break has been suggested
@@ -153,7 +153,7 @@ T (a: any) = truthy indices in a U (a: any) = uniquifed(a) # uniquify, unique items, remove duplicates V (a: any, b: any, c: any) = a.replace(needle=b, replacement=c) # replace W = [stack] # wrap stack, lisitfy whole stack -X = * context level down +X = Y (a: any, b: any) = interleave(a, b) # Interleave Z (a: any, b: any) = zip(a, b) # Zip [ = * open if statement: [truthy|falsey]
fix Adobe ID users may not have username or domain fields. For Adobe IDs, all we can rely on is email.
@@ -875,6 +875,9 @@ class RuleProcessor(object): :type umapi_user: dict """ id_type = self.get_identity_type_from_umapi_user(umapi_user) + if id_type == user_sync.identity_type.ADOBEID_IDENTITY_TYPE: + return self.get_user_key(id_type, '', '', umapi_user['email']) + else: return self.get_user_key(id_type, umapi_user['username'], umapi_user['domain'], umapi_user['email']) def get_user_key(self, id_type, username, domain, email=None):
Unquieten make_dev_install by default Summary: Adds make_dev_install_quiet for those who want that. Test Plan: Manual Reviewers: schrockn, nate
@@ -85,7 +85,9 @@ sanity_check: rebuild_dagit: sanity_check cd js_modules/dagit/; yarn install --offline && yarn build-for-python -dev_install: install_dev_python_modules rebuild_dagit +dev_install: install_dev_python_modules_verbose rebuild_dagit + +dev_install_quiet: install_dev_python_modules rebuild_dagit graphql_tests: pytest examples/dagster_examples_tests/graphql_tests/ python_modules/dagster-graphql/dagster_graphql_tests/graphql/ -s -vv
PathModel : Remove unneeded `Item::State::Requested` enum value This was necessary before we introduced the asynchronous updates, but serves no purpose now.
@@ -1097,7 +1097,7 @@ class PathModel : public QAbstractItemModel // responsible for caching the results of these queries internally. QVariant data( int column, int role, const PathModel *model ) { - if( requestIfUnrequested( m_dataState ) ) + if( dirtyIfUnrequested( m_dataState ) ) { const_cast<PathModel *>( model )->scheduleUpdate(); } @@ -1131,7 +1131,7 @@ class PathModel : public QAbstractItemModel ChildContainer &childItems( const PathModel *model ) { - if( requestIfUnrequested( m_childItemsState ) ) + if( dirtyIfUnrequested( m_childItemsState ) ) { const_cast<PathModel *>( model )->scheduleUpdate(); } @@ -1317,7 +1317,7 @@ class PathModel : public QAbstractItemModel if( match & IECore::PathMatcher::DescendantMatch ) { // Force creation of children so we can expand them. - requestIfUnrequested( m_childItemsState ); + dirtyIfUnrequested( m_childItemsState ); } // Handle expansion for selection updates. @@ -1343,7 +1343,7 @@ class PathModel : public QAbstractItemModel if( scrollToMatch & IECore::PathMatcher::DescendantMatch ) { // Force creation of children so we can scroll to them. - requestIfUnrequested( m_childItemsState ); + dirtyIfUnrequested( m_childItemsState ); } } @@ -1371,7 +1371,7 @@ class PathModel : public QAbstractItemModel if( selectionMatch & IECore::PathMatcher::DescendantMatch ) { // Force creation of children so we can expand them. - requestIfUnrequested( m_childItemsState ); + dirtyIfUnrequested( m_childItemsState ); } } @@ -1437,7 +1437,7 @@ class PathModel : public QAbstractItemModel for( const auto &childItem : newChildItems ) { - requestIfUnrequested( childItem->m_dataState ); + dirtyIfUnrequested( childItem->m_dataState ); sortedIndices.push_back( SortablePair( childItem->updateData( model, children[sortedIndices.size()].get(), canceller ), sortedIndices.size() @@ -1546,30 +1546,25 @@ class PathModel : public QAbstractItemModel // State transitions : // - // - Unrequested->Requested : When first queried. - // - Requested->Clean : When first updated. + // - Unrequested->Dirty : When first queried. + // - Dirty->Clean : When updated. // - Clean->Dirty : When path changes. - // - Dirty->Clean : On all subsequent updates. enum class State { // Initial state. Not yet requested by clients // of the model, therefore not yet computed, and not // in need of consideration during recursive updates. Unrequested, - // Has just been requested for the first time. Needs - // to be updated, but there is no need to emit change - // signals for the first update. - Requested, // Computed and up to date. Clean, // Stale data that needs recomputing. Dirty }; - static bool requestIfUnrequested( std::atomic<State> &state ) + static bool dirtyIfUnrequested( std::atomic<State> &state ) { State unrequested = State::Unrequested; - return state.compare_exchange_strong( unrequested, State::Requested ); + return state.compare_exchange_strong( unrequested, State::Dirty ); } std::atomic<State> m_dataState;
Fix EMA GPU test Summary: The GPU test was broken after (https://github.com/pytorch/fairseq/commit/1b61bbad327d2bf32502b3b9a770b57714cc43dc)
@@ -36,6 +36,7 @@ class EMAConfig(object): ema_start_update: int = 0 ema_fp32: bool = False ema_seed_model: Optional[str] = None + ema_update_freq: int = 1 @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU")
Use a smaller scroll page size to avoid timeouts (Looks like a repeat of https://manage.dimagi.com/default.asp?248384)
@@ -336,7 +336,8 @@ def get_export_documents(export_instance, filters): # We believe we can occasionally hit the 5m limit to process a single scroll window # with a window size of 1000 (https://manage.dimagi.com/default.asp?248384). # Thus, smaller window size is intentional - return query.size(500).scroll() + # Another option we have is to bump the "scroll" parameter up from "5m" + return query.size(200).scroll() def _get_export_query(export_instance, filters):
fix: remove CONF_OAUTH_LOGIN calls closes
@@ -150,7 +150,6 @@ async def async_setup(hass, config, discovery_info=None): ].total_seconds(), CONF_OAUTH: account.get(CONF_OAUTH, {}), CONF_OTPSECRET: account.get(CONF_OTPSECRET, ""), - CONF_OAUTH_LOGIN: account.get(CONF_OAUTH_LOGIN, True), }, ) entry_found = True @@ -171,7 +170,6 @@ async def async_setup(hass, config, discovery_info=None): CONF_SCAN_INTERVAL: account[CONF_SCAN_INTERVAL].total_seconds(), CONF_OAUTH: account.get(CONF_OAUTH, {}), CONF_OTPSECRET: account.get(CONF_OTPSECRET, ""), - CONF_OAUTH_LOGIN: account.get(CONF_OAUTH_LOGIN, True), }, ) ) @@ -307,10 +305,7 @@ async def async_setup_entry(hass, config_entry): otp_secret=account.get(CONF_OTPSECRET, ""), oauth=account.get(CONF_OAUTH, {}), uuid=uuid, - oauth_login=bool( - account.get(CONF_OAUTH, {}).get("access_token") - or account.get(CONF_OAUTH_LOGIN) - ), + oauth_login=True, ), ) hass.data[DATA_ALEXAMEDIA]["accounts"][email]["login_obj"] = login
Source: Refactoring pre-defined sources to include `GaborSource` We now use a baseclass `WaveletSource` that can be arbitrarily subclassed to provide further source presets.
@@ -5,7 +5,7 @@ from devito.logger import error import numpy as np import matplotlib.pyplot as plt -__all__ = ['PointSource', 'Receiver', 'Shot', 'RickerSource'] +__all__ = ['PointSource', 'Receiver', 'Shot', 'RickerSource', 'GaborSource'] class PointSource(PointData): @@ -54,12 +54,12 @@ Receiver = PointSource Shot = PointSource -class RickerSource(PointSource): +class WaveletSource(PointSource): """ - Symbolic object to encapsulate a set of sources with a - pre-defined Ricker wavelet. + Abstract base class for symbolic objects that encapsulate a set of + sources with a pre-defined source signal wavelet. - :param name: Name for the reuslting symbol + :param name: Name for the resulting symbol :param f0: Peak frequency for Ricker wavelet in kHz :param time: Discretized values of time in ms :param ndim: Number of spatial dimensions @@ -80,17 +80,16 @@ class RickerSource(PointSource): def __init__(self, *args, **kwargs): if not self._cached(): - super(RickerSource, self).__init__(*args, **kwargs) + super(WaveletSource, self).__init__(*args, **kwargs) def wavelet(self, f0, t): """ - Create Ricker wavelet with a peak frequency f0 at time t. + Defines a wavelet with a peak frequency f0 at time t. - :param f0: Peak frequency - :param t: Discretized values of time + :param f0: Peak frequency in kHz + :param t: Discretized values of time in ms """ - r = (np.pi * f0 * (t - 1./f0)) - return (1-2.*r**2)*np.exp(-r**2) + raise NotImplementedError('Wavelet not defined') def show(self, idx=0, time=None, wavelet=None): """ @@ -108,3 +107,53 @@ class RickerSource(PointSource): plt.ylabel('Velocity (km/s)') plt.tick_params() plt.show() + + +class RickerSource(WaveletSource): + """ + Symbolic object that encapsulate a set of sources with a + pre-defined Ricker wavelet: + + http://subsurfwiki.org/wiki/Ricker_wavelet + + :param name: Name for the resulting symbol + :param f0: Peak frequency for Ricker wavelet in kHz + :param time: Discretized values of time in ms + :param ndim: Number of spatial dimensions + """ + + def wavelet(self, f0, t): + """ + Defines a Ricker wavelet with a peak frequency f0 at time t. + + :param f0: Peak frequency in kHz + :param t: Discretized values of time in ms + """ + r = (np.pi * f0 * (t - 1./f0)) + return (1-2.*r**2)*np.exp(-r**2) + + +class GaborSource(WaveletSource): + """ + Symbolic object that encapsulate a set of sources with a + pre-defined Gabor wavelet: + + https://en.wikipedia.org/wiki/Gabor_wavelet + + :param name: Name for the resulting symbol + :param f0: Peak frequency for Ricker wavelet in kHz + :param time: Discretized values of time in ms + :param ndim: Number of spatial dimensions + """ + + def wavelet(self, f0, t): + """ + Defines a Gabor wavelet with a peak frequency f0 at time t. + + :param f0: Peak frequency in kHz + :param t: Discretized values of time in ms + """ + agauss = 0.5 * f0 + tcut = 1.5 / agauss + s = (t-tcut) * agauss + return np.exp(-2*s**2) * np.cos(2 * np.pi * s)
circleci: Store XUnit test results. Fixes part of
@@ -113,6 +113,9 @@ jobs: path: ../../../tmp/zulip-test-event-log/ destination: test-reports + - store_test_results: + path: ./var/xunit-test-results/casper/ + "bionic-backend-python3.6": docker: # This is built from tools/circleci/images/bionic/Dockerfile .
Update sc2reader/scripts/sc2json.py Fixed type - Committed suggestion from PR review
@@ -34,6 +34,11 @@ def main(): args = parser.parse_args() factory = sc2reader.factories.SC2Factory() + try: + factory.register_plugin( + "Replay", toJSON(encoding=args.encoding, indent=args.indent) + ) # legacy Python + except TypeError: factory.register_plugin("Replay", toJSON(indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json)
fix: inject direct response for sidecar acme-challenge This was accidently reverted and so this just replacing the logic from
@@ -892,32 +892,22 @@ class V3Listener: # If we're on Edge Stack and we don't already have an ACME route, add one. if self.config.ir.edge_stack_allowed and not found_acme: - # The target cluster doesn't actually matter -- the auth service grabs the - # challenge and does the right thing. But we do need a cluster that actually - # exists, so use the sidecar cluster. - - if not self.config.ir.sidecar_cluster_name: - # Uh whut? how is Edge Stack running exactly? - raise Exception( - "Edge Stack claims to be running, but we have no sidecar cluster??" - ) + # This route is needed to trigger an ExtAuthz request for the AuthService. + # The auth service grabs the challenge and does the right thing. + # Rather than try to route to some existing cluster we can just return a + # direct response. What we return doesn't really matter but + # to match existing Edge Stack behavior we return a 404 response. self.config.ir.logger.debug(" punching a hole for ACME") - # Make sure to include _host_constraints in here for now. - # - # XXX This is needed only because we're dictifying the V3Route too early. - + # Make sure to include _host_constraints in here for now so it can be + # applied to the correct vhost during future proccessing chain.routes.insert( 0, { "_host_constraints": set(), "match": {"case_sensitive": True, "prefix": "/.well-known/acme-challenge/"}, - "route": { - "cluster": self.config.ir.sidecar_cluster_name, - "prefix_rewrite": "/.well-known/acme-challenge/", - "timeout": "3.000s", - }, + "direct_response": {"status": 404}, }, )
[bugfix] use provided edit summary This solves regression of
@@ -948,7 +948,7 @@ def main(*args): elif arg.startswith('-addcat:'): options['addcat'] = arg[8:] elif arg.startswith('-summary:'): - options['summary'] = arg[9:] + edit_summary = arg[9:] elif arg.startswith('-automaticsummary'): edit_summary = True elif arg.startswith('-manualinput'): @@ -1156,7 +1156,8 @@ LIMIT 200""" % (whereClause, exceptClause) pywikibot.bot.suggest_help(missing_generator=True) return - bot = ReplaceRobot(gen, replacements, exceptions, site=site, **options) + bot = ReplaceRobot(gen, replacements, exceptions, site=site, + summary=edit_summary, **options) site.login() bot.run()
tools: Allow optional arguments after file arguments in test_backend. Fixes Uses nargs='*' instead of nargs='argparse.REMAINDER'. nargs='argparse.REMAINDER' gathers remaining terms as arguments even if it is an option e.g --coverage, while '*' gathers all the command-line arguments until the next option is encountered.
@@ -234,7 +234,7 @@ if __name__ == "__main__": default=False, help=("Run the tests which failed the last time " "test-backend was run. Implies --nonfatal-errors.")) - parser.add_argument('args', nargs=argparse.REMAINDER) + parser.add_argument('args', nargs='*') options = parser.parse_args() args = options.args
fix RSA Netwitness SA integration Was missing break in switch. And it cause all the commands run one after another
@@ -1074,48 +1074,63 @@ script: switch (command) { case 'fetch-incidents': results = fetchIncidents(sessionId, args, incidentManagementId); + break; case 'test-module': results = 'ok'; + break; case 'nw-login': results = sessionId; + break; case 'nw-list-incidents': var incidents = listIncidents(sessionId, args, incidentManagementId); results = { incidents: incidents }; + break; case 'nw-get-incident-details': var incident = getIncidentById(sessionId, args, incidentManagementId); results = { incident: incident }; + break; case 'nw-get-components': var components = getComponents(sessionId, args); results = { components: components }; + break; case 'nw-get-events': var events = getEvents(sessionId, args); results = { events: events }; + break; case 'nw-get-event-details': results = getEventDetails(sessionId, args); + break; case 'nw-get-alerts': var alerts = filterAlerts(sessionId, args); results = { alerts: alerts }; + break; case 'nw-get-alert-details': var alert = getAlertById(sessionId, args); results = { alert: alert }; + break; case 'nw-get-alert-original': var alert = getOriginalAlertById(sessionId, args); results = { alert: alert }; + break; case 'nw-get-available-assignees': var availableAssignees = getAvailableAssignees(sessionId); results = { availableAssignees: availableAssignees }; + break; case 'nw-create-incident': var availableAssignees = getAvailableAssignees(sessionId); var newIncident = createIncident(sessionId, args, incidentManagementId, availableAssignees); results = { incident: newIncident }; + break; case 'nw-add-events-to-incident': var availableAssignees = getAvailableAssignees(sessionId, args); var isSuccess = addEventsToIncident(sessionId, args, incidentManagementId, availableAssignees); results = { success: isSuccess }; + break; case 'nw-update-incident': var availableAssignees = getAvailableAssignees(sessionId, args); var updatedIncident = updateIncident(sessionId, args, incidentManagementId, availableAssignees); results = updatedIncident; + break; default: // You can use args[argName] or args.argName to get a specific arg. args are strings. // You can use params[paramName] or params.paramName to get a specific params.
Converted DigikeyError exceptions to KiCostError ones They are known errors.
@@ -30,7 +30,7 @@ __company__ = 'Instituto Nacional de Tecnologia Industrial - Argentina' import pprint # KiCost definitions. -from ..global_vars import DEBUG_OVERVIEW, DEBUG_DETAILED, DEBUG_OBSESSIVE, W_NOINFO +from ..global_vars import DEBUG_OVERVIEW, DEBUG_DETAILED, DEBUG_OBSESSIVE, W_NOINFO, KiCostError, ERR_SCRAPE from .. import DistData # Distributors definitions. from .distributor import distributor_class @@ -40,7 +40,7 @@ available = True # from kicost_digikey_api_v3 import by_digikey_pn, by_manf_pn, by_keyword, configure # except ImportError: # available = False -from kicost_digikey_api_v3 import by_digikey_pn, by_manf_pn, by_keyword, configure # noqa: E402 +from kicost_digikey_api_v3 import by_digikey_pn, by_manf_pn, by_keyword, configure, DigikeyError # noqa: E402 DIST_NAME = 'digikey' @@ -60,7 +60,7 @@ class api_digikey(distributor_class): distributor_class.add_distributors([DIST_NAME]) @staticmethod - def query_part_info(parts, distributors, currency): + def _query_part_info(parts, distributors, currency): '''Fill-in the parts with price/qty/etc info from KitSpace.''' if DIST_NAME not in distributors: distributor_class.logger.log(DEBUG_OVERVIEW, '# Skipping Digi-Key plug-in') @@ -120,5 +120,15 @@ class api_digikey(distributor_class): progress.update(1) progress.close() + @staticmethod + def query_part_info(parts, distributors, currency): + msg = None + try: + api_digikey._query_part_info(parts, distributors, currency) + except DigikeyError as e: + msg = e.args[0] + if msg is not None: + raise KiCostError(msg, ERR_SCRAPE) + distributor_class.register(api_digikey, 100)
Cleanup gtk/ProgressBar - remove redundant `rehint()` remove `rehint()` remove unused `if/else` from `start()`
@@ -24,9 +24,6 @@ class ProgressBar(Widget): self._render_disabled() def start(self): - if self.interface.max: - pass # GTK has no 'working' animation - else: GObject.timeout_add(60, self._pulse, None) def stop(self): @@ -38,10 +35,3 @@ class ProgressBar(Widget): self.set_value(None) else: self._render_disabled() - - def rehint(self): - width = self.native.get_preferred_width() - height = self.native.get_preferred_height() - - self.interface.intrinsic.width = at_least(width[0]) - self.interface.intrinsic.height = height[1]
fixed bug `residual_before_ln` - second missed the `not` fixed bug `enable_adapters(adapter_type, True, True)`
@@ -17,6 +17,7 @@ class BertSelfOutputAdaptersMixin: self.attention_adapters_fusion = nn.ModuleDict(dict()) self.attention_text_lang_adapters = nn.ModuleDict(dict()) self.language_attention_adapters_fusion = nn.ModuleDict(dict()) + self.language_adapter_attention = nn.ModuleDict(dict()) def add_adapter(self, adapter_name: str, adapter_type: AdapterType): adapter_config = self.config.adapters.get(adapter_name) @@ -106,7 +107,7 @@ class BertSelfOutputAdaptersMixin: if lang_adapter_config['original_ln_before']: hidden_states = self.LayerNorm(hidden_states + input_tensor) - if lang_adapter_config['residual_before_ln']: + if not lang_adapter_config['residual_before_ln']: residual = hidden_states * 1.0 hidden_states, adapter_attention, down, up = self.attention_text_lang_adapters[language]( @@ -134,7 +135,7 @@ class BertSelfOutputAdaptersMixin: if task_adapter_config['original_ln_before']: hidden_states = self.LayerNorm(hidden_states + input_tensor) - if task_adapter_config['residual_before_ln']: + if not task_adapter_config['residual_before_ln']: residual = hidden_states * 1.0 if hasattr(self.config, 'fusion_config') and not self.config.fusion_config['query_before_ln']: @@ -263,7 +264,7 @@ class BertOutputAdaptersMixin: if lang_adapter_config['original_ln_before']: hidden_states = self.LayerNorm(hidden_states + input_tensor) - if lang_adapter_config['residual_before_ln']: + if not lang_adapter_config['residual_before_ln']: residual = hidden_states * 1.0 hidden_states, adapter_attention, down, up = self.layer_text_lang_adapters[language]( @@ -291,7 +292,7 @@ class BertOutputAdaptersMixin: if task_adapter_config['original_ln_before']: hidden_states = self.LayerNorm(hidden_states + input_tensor) - if task_adapter_config['residual_before_ln']: + if not task_adapter_config['residual_before_ln']: residual = hidden_states * 1.0 if hasattr(self.config, 'fusion_config') and not self.config.fusion_config['query_before_ln']: @@ -442,7 +443,7 @@ class BertModelAdaptersMixin(ModelAdaptersMixin): raise ValueError("No adapters of this type available fro training.") self.train() self.freeze_model(True) - self.encoder.enable_adapters(adapter_type, True, True) + self.encoder.enable_adapters(adapter_type, True, False) # unfreeze invertible adapters for language adapters if adapter_type == AdapterType.text_lang: for param in self.invertible_lang_adapters.parameters():
Remove Exception on _default_verify_function failure This makes it match the verify function documentation.
@@ -123,12 +123,15 @@ class DeviceInterface(object): #if the user has specified a custom verify function, then call it, else use default based on numpy allclose if verify: try: - return verify(answer, result_host, atol=atol) + correct = verify(answer, result_host, atol=atol) except TypeError: - return verify(answer, result_host) + correct = verify(answer, result_host) else: - return _default_verify_function(instance, answer, result_host, atol, verbose) + correct = _default_verify_function(instance, answer, result_host, atol, verbose) + if not correct: + raise Exception("Kernel result verification failed for: " + util.get_config_string(instance.params)) + return True def compile_and_benchmark(self, gpu_args, params, kernel_options, tuning_options): """ Compile and benchmark a kernel instance based on kernel strings and parameters """ @@ -341,7 +344,6 @@ def _default_verify_function(instance, answer, result_host, atol, verbose): if not correct: logging.debug('correctness check has found a correctness issue') - raise Exception("Error: " + util.get_config_string(instance.params) + " failed correctness check") return correct
Move callback calling at very end of teardown Fixes
@@ -299,9 +299,11 @@ def teardown(close_frame=None): self.sock.close() close_status_code, close_reason = self._get_close_args( close_frame if close_frame else None) - self._callback(self.on_close, close_status_code, close_reason) self.sock = None + # Finally call the callback AFTER all teardown is complete + self._callback(self.on_close, close_status_code, close_reason) + try: self.sock = WebSocket( self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
[ENH] set default for forecaster tag `ignores-exogeneous-X` to `False` This PR sets the default for the forecaster tag `ignores-exogeneous-X` to `False`. This is the safer default, or otherwise exogeneous `X` will be inored under the default setting, and this might be accidental and unexpected for an implementer. See bug here:
@@ -88,7 +88,7 @@ class BaseForecaster(BaseEstimator): # default tag values - these typically make the "safest" assumption _tags = { "scitype:y": "univariate", # which y are fine? univariate/multivariate/both - "ignores-exogeneous-X": True, # does estimator ignore the exogeneous X? + "ignores-exogeneous-X": False, # does estimator ignore the exogeneous X? "capability:pred_int": False, # can the estimator produce prediction intervals? "handles-missing-data": False, # can estimator handle missing data? "y_inner_mtype": "pd.Series", # which types do _fit/_predict, support for y?
Added inital values for pulse parameters This was needed such that the martinis pulse does not have any poles or discontinuities when calculated with the initial values.
@@ -137,9 +137,11 @@ class QWG_FluxLookuptableManager(Instrument): self.add_parameter('F_kernel_instr', parameter_class=InstrumentParameter) - self.add_parameter('F_amp', unit='V', parameter_class=ManualParameter) + self.add_parameter('F_amp', unit='V', parameter_class=ManualParameter, + initial_value=0) self.add_parameter('F_length', unit='s', - parameter_class=ManualParameter) + parameter_class=ManualParameter, + initial_value=1e-6) self.add_parameter('F_ch', label='Flux channel', vals=vals.Ints(), parameter_class=ManualParameter) @@ -179,20 +181,20 @@ class QWG_FluxLookuptableManager(Instrument): docstring='theta_f for martinis pulse', label='theta_f', unit='deg', - initial_value=0.0, + initial_value=90, vals=vals.Numbers(), parameter_class=ManualParameter) self.add_parameter('F_J2', docstring='coupling between 11-02', label='J2', unit='Hz', - initial_value=0.0, + initial_value=10e6, vals=vals.Numbers(), parameter_class=ManualParameter) self.add_parameter('F_f_interaction', label='interaction frequency', unit='Hz', - initial_value=0.0, + initial_value=5e9, vals=vals.Numbers(), parameter_class=ManualParameter) self.add_parameter('F_dac_flux_coef', @@ -205,13 +207,13 @@ class QWG_FluxLookuptableManager(Instrument): self.add_parameter('F_E_c', label='qubit E_c', unit='Hz', - initial_value=0.0, + initial_value=250e6, vals=vals.Numbers(), parameter_class=ManualParameter) self.add_parameter('F_f_01_max', label='sweet spot freq', unit='Hz', - initial_value=0.0, + initial_value=6e9, vals=vals.Numbers(), parameter_class=ManualParameter) self.add_parameter('F_asymmetry', @@ -362,7 +364,7 @@ class QWG_FluxLookuptableManager(Instrument): delayed_wave = np.concatenate([wait_samples, np.array(waveform), wait_samples_2, -1*np.array(waveform)]) distorted_wave = k.convolve_kernel([k.kernel(), delayed_wave], - length_samples=60e3) + length_samples=30e3) # was 60e3 self._wave_dict[pulse_name] = distorted_wave return distorted_wave
Create `join_role_stats` function in helpers Add `join_role_stats` function that joins the relevant information (number of members) of the given roles into one group under a pre-specified `name`
from abc import ABCMeta -from typing import Optional +from types import List +from typing import Dict, Optional +from discord import Guild from discord.ext.commands import CogMeta @@ -30,3 +32,11 @@ def has_lines(string: str, count: int) -> bool: def pad_base64(data: str) -> str: """Return base64 `data` with padding characters to ensure its length is a multiple of 4.""" return data + "=" * (-len(data) % 4) + + +def join_role_stats(role_ids: List[int], name: str, guild: Guild) -> Dict[str, int]: + """Return a dict object with the number of `members` of each role given, and the `name` for this joined group.""" + members = [] + for role_id in role_ids: + members += guild.get_role(role_id).members + return {name: len(set(members))}
Update lxd init message to remove auto setup Fixes
@@ -55,10 +55,7 @@ class CloudView(WidgetWrap): " $ sudo snap install lxd\n" " $ sudo usermod -a -G lxd <youruser>\n" " $ newgrp lxd\n" - " $ /snap/bin/lxd init --auto\n" - " $ /snap/bin/lxc network create lxdbr0 " - "ipv4.address=auto ipv4.nat=true " - "ipv6.address=none ipv6.nat=false ") + " $ /snap/bin/lxd init") def __init__(self, app, public_clouds, custom_clouds, compatible_cloud_types, cb=None):
change isShellBuiltin change isShellBuiltin to have each builtin as a different token in a set and lookup cmd to be an element of the set instead of a sequence of characters in a string (tested only on Python 3.6.7). Corrected according to comments.
@@ -181,7 +181,7 @@ def which(cmd, **kwargs ): def isShellBuiltin( cmd ): "Return True if cmd is a bash builtin." if isShellBuiltin.builtIns is None: - isShellBuiltin.builtIns = quietRun( 'bash -c enable' ) + isShellBuiltin.builtIns = set(quietRun( 'bash -c enable' ).split()) space = cmd.find( ' ' ) if space > 0: cmd = cmd[ :space]
Fix eigenvalue_band_properties check Fix eigenvalue_band_properties check for separate_spins (index error)
@@ -721,7 +721,7 @@ class VasprunTest(PymatgenTest): self.assertAlmostEqual(props[1][1], 1.6225, places=4) self.assertAlmostEqual(props[2][0], 0.7969, places=4) self.assertAlmostEqual(props[2][1], 0.3415, places=4) - self.assertAlmostEqual(props2[0], np.min(props[1]) - np.max(props[1]), places=4) + self.assertAlmostEqual(props2[0], np.min(props[1]) - np.max(props[2]), places=4) self.assertEqual(props[3][0], True) self.assertEqual(props[3][1], True) @@ -2043,7 +2043,7 @@ class EigenvalTest(PymatgenTest): self.assertAlmostEqual(props[1][1], 1.6225, places=4) self.assertAlmostEqual(props[2][0], 0.7969, places=4) self.assertAlmostEqual(props[2][1], 0.3415, places=4) - self.assertAlmostEqual(props2[0], np.min(props[1]) - np.max(props[1]), places=4) + self.assertAlmostEqual(props2[0], np.min(props[1]) - np.max(props[2]), places=4) self.assertEqual(props[3][0], True) self.assertEqual(props[3][1], True)
tests: delete journal partitions in lvm_setup.yml Delete these before creating them incase they are left around in a purge cluster testing scenario. The purge-cluster.yml playbook does not currently remove partitions used for journals.
command: lvcreate --yes -l 50%FREE -n data-lv2 test_group failed_when: false + # purge-cluster.yml does not properly destroy partitions + # used for lvm osd journals, this ensures they are removed + # for that testing scenario + - name: remove /dev/sdc1 if it exists + parted: + device: /dev/sdc + number: 1 + state: absent + + - name: remove /dev/sdc2 if it exists + parted: + device: /dev/sdc + number: 2 + state: absent + - name: partition /dev/sdc for journals parted: device: /dev/sdc
Closed a very small race window in addRows, added a passthrough version of lru_cache for legacy python.
@@ -2,9 +2,8 @@ from __future__ import absolute_import import sys import struct -from binascii import hexlify, unhexlify +from binascii import unhexlify from contextlib import contextmanager -from functools import lru_cache import xxhash @@ -16,6 +15,9 @@ import synapse.lib.threads as s_threads import lmdb +if sys.version_info > (3, 0): + from functools import lru_cache + # File conventions: # i, p, v, t: iden, prop, value, timestamp # i_enc, p_enc, v_key_enc, t_enc: above encoded to be space efficient and fully-ordered when @@ -94,6 +96,12 @@ else: def _memToBytes(x): return str(x) + def lru_cache(maxsize): + ''' A dumb passthrough. Python 2 impl is just going to be a tad slower ''' + def actual_decorator(wrappee): + return wrappee + return actual_decorator + def _encValKey(v): ''' Encode a v. Non-negative numbers are msgpack encoded. Negative numbers are encoded @@ -311,8 +319,10 @@ class Cortex(s_cores_common.Cortex): self.onfini(onfini) def _addRows(self, rows): - next_pk = self.next_pk encs = [] + + with self._getTxn(write=True) as txn: + next_pk = self.next_pk for i, p, v, t in rows: if next_pk > MAX_PK: raise DatabaseLimitReached('Out of primary key values') @@ -328,7 +338,6 @@ class Cortex(s_cores_common.Cortex): encs.append((i_enc, p_enc, row_enc, t_enc, v_key_enc, pk_enc)) next_pk += 1 - with self._getTxn(write=True) as txn: # an iterator of key, value pairs: key=pk_key_enc, val=i_enc+p_enc+v_val_enc+t_enc kvs = ((x[5], x[2]) for x in encs) consumed, added = txn.cursor(db=self.rows).putmulti(kvs, overwrite=False, append=True)
[IMPR] make Family.langs property more robust Don't include closed wikis to family files with wikimedia_sites.py. code is used as a local variable but assigning to cls.codes may cause side effects and cls.codes could be be overwritten. Use a copy of cls.codes instead.
@@ -1568,7 +1568,7 @@ class SubdomainFamily(Family): @classproperty def langs(cls): """Property listing family languages.""" - codes = cls.codes + codes = cls.codes[:] if hasattr(cls, 'test_codes'): codes += cls.test_codes
[celery] Make celery backend configurable Also only use eager using testing, even if it is not recommended
@@ -269,7 +269,8 @@ logging.basicConfig( # set up celery -CELERY_BROKER_URL = 'amqp://localhost' +CELERY_BROKER_URL = get_from_env('CELERY_BROKER_URL', 'amqp://localhost') +if TESTING: CELERY_TASK_ALWAYS_EAGER = True #database_url = DATABASES['default']
Update docker.md simple path fix
@@ -47,5 +47,5 @@ curl https://raw.githubusercontent.com/keras-team/autokeras/master/examples/mnis Run the mnist example : ``` -docker run -it -v "$(pwd)":/app --shm-size 2G haifengjin/autokeras python mnist.py +docker run -it -v "$(pwd)":/app --shm-size 2G haifengjin/autokeras python /app/mnist.py ```
Add logging in Populate_Lexical_Env TN:
@@ -698,6 +698,10 @@ package body ${ada_lib_name}.Analysis is if Unit.AST_Root = null then return; end if; + + Traces.Trace (Main_Trace, "Populating lexical envs for unit: " + & Basename (Unit)); + Unit.Context.In_Populate_Lexical_Env := True; declare Has_Errors : constant Boolean := Populate_Lexical_Env
Update README.md Default pip version was sometimes resulting in segmentation fault when installing requirements. The newest version seems to work well.
@@ -67,7 +67,7 @@ Install dependencies ```bash sudo apt update sudo apt -y install swig3.0 python3-dev build-essential cmake ninja-build libboost-random-dev libssl-dev libffi-dev -sudo pip3 install -U setuptools +sudo pip3 install -U setuptools pip ``` To get the source and start the node, use the following: