message
stringlengths
13
484
diff
stringlengths
38
4.63k
VOLTHA utests are run in non-deterministic order Reverse sort utest directories to ensure any host runs the tests in the same order.
@@ -256,12 +256,12 @@ test: venv protos run-as-root-tests utest: venv protos @ echo "Executing all unit tests" . ${VENVDIR}/bin/activate && \ - for d in $$(find ./tests/utests -depth -type d); do echo $$d:; nosetests $$d; done + for d in $$(find ./tests/utests -type d|sort -nr); do echo $$d:; nosetests $$d; done utest-with-coverage: venv protos @ echo "Executing all unit tests and producing coverage results" . ${VENVDIR}/bin/activate && \ - for d in $$(find ./tests/utests -depth -type d); do echo $$d:; \ + for d in $$(find ./tests/utests -type d|sort -nr); do echo $$d:; \ nosetests --with-xcoverage --with-xunit --cover-package=voltha,common,ofagent,chameleon $$d; done itest: venv run-as-root-tests
fix bug when setting "tags" in report file. When tags not define we set this to empty string
@@ -162,7 +162,9 @@ def update_report(valid_builders): ]: entry[item] = builder.metadata[item] - # convert tags to string. + entry["tags"] = "" + # convert tags to string if defined in buildspec + if builder.metadata["tags"]: entry["tags"] = " ".join(builder.metadata["tags"]) # query over result attributes, we only assign some keys of interest
Fix type assert Summary: To check if a tensor is a byte tensor, we should use `self.action.type() == "torch.ByteTensor"`.
@@ -482,7 +482,7 @@ class RawMemoryNetworkInput(RawBaseInput): action, ) else: - assert isinstance(self.action, torch.ByteTensor) + assert self.action.dtype == torch.uint8 return PreprocessedMemoryNetworkInput( self.reward, self.time_diff, @@ -514,7 +514,7 @@ class RawMemoryNetworkInput(RawBaseInput): action, ) else: - assert isinstance(self.action, torch.ByteTensor) + assert self.action.dtype == torch.uint8 return PreprocessedMemoryNetworkInput( self.reward, self.time_diff,
Update cdd.py Changed datetime format to sync with /www.cryptodatadownload.com
@@ -71,7 +71,7 @@ class CryptoDataDownload: if "d" in timeframe: df["date"] = pd.to_datetime(df["date"]) elif "h" in timeframe: - df["date"] = pd.to_datetime(df["date"], format="%Y-%m-%d %I-%p") + df["date"] = pd.to_datetime(df["date"], format="%Y-%m-%d %H:%M:%S") df = df.set_index("date") df.columns = [name.lower() for name in df.columns]
Changelog for 0.7.1 Test Plan: N/A Reviewers: schrockn, alangenfeld, prha, nate
# Changelog -## 0.7.0 (Upcoming) +## 0.7.1 + +**Dagit** + +- Dagit now looks up an available port on which to run when the default port is + not available. (Thanks @rparrapy!) + +**dagster_pandas** + +- Hydration and materialization are now configurable on `dagster_pandas` dataframes. + +**dagster_aws** + +- The `s3_resource` no longer uses an unsigned session by default. + +**Bugfixes** + +- Type check messages are now displayed in Dagit. +- Failure metadata is now surfaced in Dagit. +- Dagit now correctly displays the execution time of steps that error. +- Error messages now appear correctly in console logging. +- GCS storage is now more robust to transient failures. +- Fixed an issue where some event logs could be duplicated in Dagit. +- Fixed an issue when reading config from an environment variable that wasn't set. +- Fixed an issue when loading a repository or pipeline from a file target on Windows. +- Fixed an issue where deleted runs could cause the scheduler page to crash in Dagit. + +**Documentation** + +- Expanded and improved docs and error messages. + +## 0.7.0 **Breaking Changes**
Added an example from Pittsburgh Added a video of a woman in Pittsburgh who was not resisting arrest being pepper sprayed.
@@ -67,3 +67,12 @@ Three protestors kneeling on the ground with their hands on their heads/covering * https://twitter.com/d0wnrrrrr/status/1267691766188310528 +## Pittsburgh + +### Officer pepper-sprays a woman who is on her knees with her hands up + +A woman in East Liberty gets onto her knees and puts her hands in the air, while repeatedly yelling "we're peaceful, don't shoot!" Officers approach her and one of them sprays pepper spray onto her. + +**Links** + +* https://www.youtube.com/watch?v=TxHxU6nhzzQ
Added Tensorboard Logging callback Additionally added log-dir argparse.
@@ -113,6 +113,20 @@ def create_callbacks(model, training_model, prediction_model, validation_generat lr_scheduler = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0) callbacks.append(lr_scheduler) + if args.log_dir: + tb = keras.callbacks.TensorBoard( + log_dir = args.log_dir, + histogram_freq = 0, + batch_size = args.batch_size, + write_graph = True, + write_grads = False, + write_images = False, + embeddings_freq = 0, + embeddings_layer_names = None, + embeddings_metadata = None + ) + callbacks.append(tb) + return callbacks @@ -220,6 +234,7 @@ def parse_args(args): parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50) parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000) parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots') + parser.add_argument('--log-dir', help='Log directory for Tensorboard output', default='./logs') parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false') parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
Fix sigmoid_cross_entropy doc about t functions.sigmoid_cross_entropy 's document has a wrong description about `t`. This PR fixes it.
@@ -77,9 +77,10 @@ def sigmoid_cross_entropy( (i, j)-th element indicates the unnormalized log probability of the j-th unit at the i-th example. t (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ - :class:`cupy.ndarray`): Variable holding a signed integer vector of - ground truth labels. If ``t[i] == -1``, corresponding ``x[i]`` is - ignored. + :class:`cupy.ndarray`): A variable object holding a matrix whose + (i, j)-th element indicates a signed integer vector of + ground truth labels 0 or 1. + If ``t[i, j] == -1``, corresponding ``x[i, j]`` is ignored. Loss is zero if all ground truth labels are ``-1``. normalize (bool): Variable holding a boolean value which determines the normalization constant. If true, this function
feat(project): j'abandonne, tout est en conflit xxxxxxxx de xxxxxx
@@ -10,6 +10,7 @@ class IncrementalAgent(Agent): def __init__(self, env, **kwargs): """ + Parameters ---------- env : Model @@ -19,8 +20,7 @@ class IncrementalAgent(Agent): @abstractmethod def partial_fit(self, fraction, **kwargs): - """ - Partially fits the agent, according to the fraction parameter. + """Partially fits the agent, according to the fraction parameter. For instance, if the agent requires N episodes for a "full" fit, calling partial_fit(0.5) will fit the agent for 0.5*N episodes. @@ -38,3 +38,4 @@ class IncrementalAgent(Agent): info : dict """ raise NotImplementedError("agent.partial_fit() not implemented.") +
Fix StorageHelper.delete() does not respect path substitutions Fixes allegroai/clearml#825 Make StorageHelper.get_object() call StorageHelper._canonized_url() to make path substitutions work.
@@ -1193,6 +1193,7 @@ class StorageHelper(object): :return: The remote object """ + path = self._canonize_url(path) object_name = self._normalize_object_name(path) try: return self._driver.get_object(
purge: fix rbd-mirror group name the default is rbdmirrors in ceph-defaults
- name: purge ceph rbd-mirror cluster vars: - rbdmirror_group_name: rbd-mirrors + rbdmirror_group_name: rbdmirrors hosts: - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
Disable controllers which have no endpoints Controllers without endpoints happen if the bootstrap fails before the controller instance is created. These cannot be used in future deploys, obviously, and will cause an error if selected. This disables them and provides information on how to clean them up. Fixes
@@ -40,16 +40,19 @@ class ControllerListView(BaseView): widget.append(Padding.line_break("")) cdict = defaultdict(lambda: defaultdict(list)) for cname, d in self.controllers.items(): - cdict[d['cloud']][d.get('region', None)].append(cname) + cdict[d['cloud']][d.get('region', None)].append((cname, d)) for cloudname, cloud_d in sorted(cdict.items()): widget.append(Color.label(Text(" {}".format(cloudname)))) for regionname, controllers in cloud_d.items(): - for controller in sorted(controllers): - label = " {}".format(controller) + for controller_name, controller in sorted(controllers): + label = " {}".format(controller_name) if regionname: label += " ({})".format(regionname) - widget.append_option(label, controller) + widget.append_option( + label, + controller, + enabled=controller.get('api-endpoints')) widget.append(Padding.line_break("")) widget.append(Padding.line_break("")) widget.append(HR()) @@ -57,5 +60,19 @@ class ControllerListView(BaseView): widget.select_first() return widget + def after_keypress(self): + selected = self.widget.selected_widgets + if selected is None: + return + elif selected.enabled: + msg = self.footer + else: + msg = ('This controller has no endpoints, so it cannot be used. ' + 'To clean it up, run: juju unregister {}'.format( + selected.label.strip().split()[0])) + self.set_footer(msg) + def submit(self): + if not self.widget.selected: + return # tried to select disabled controller self.submit_cb(self.widget.selected)
Update README.md Add the memory and disk requirement of DDG-DA.
@@ -4,16 +4,16 @@ This is the implementation of `DDG-DA` based on `Meta Controller` component prov Please refer to the paper for more details: *DDG-DA: Data Distribution Generation for Predictable Concept Drift Adaptation* [[arXiv](https://arxiv.org/abs/2201.04038)] -## Background +# Background In many real-world scenarios, we often deal with streaming data that is sequentially collected over time. Due to the non-stationary nature of the environment, the streaming data distribution may change in unpredictable ways, which is known as concept drift. To handle concept drift, previous methods first detect when/where the concept drift happens and then adapt models to fit the distribution of the latest data. However, there are still many cases that some underlying factors of environment evolution are predictable, making it possible to model the future concept drift trend of the streaming data, while such cases are not fully explored in previous work. Therefore, we propose a novel method `DDG-DA`, that can effectively forecast the evolution of data distribution and improve the performance of models. Specifically, we first train a predictor to estimate the future data distribution, then leverage it to generate training samples, and finally train models on the generated data. -## Dataset +# Dataset The data in the paper are private. So we conduct experiments on Qlib's public dataset. Though the dataset is different, the conclusion remains the same. By applying `DDG-DA`, users can see rising trends at the test phase both in the proxy models' ICs and the performances of the forecasting models. -## Run the Code +# Run the Code Users can try `DDG-DA` by running the following command: ```bash python workflow.py run_all @@ -24,7 +24,10 @@ The default forecasting models are `Linear`. Users can choose other forecasting python workflow.py --forecast_model="gbdt" run_all ``` - -## Results - +# Results The results of related methods in Qlib's public dataset can be found [here](../) + +# Requirements +Here is the minimal hardware requirements to run the ``workflow.py`` of DDG-DA. +* Memory: 45G +* Disk: 4G
Arrow head length and head width option is added in nyquist_plot function Add option to change Nyquist plot arrow size: * Nyquist_plot changed to accommodate arrow size * color option is added
@@ -433,8 +433,9 @@ def bode_plot(syslist, omega=None, # Nyquist plot # -def nyquist_plot(syslist, omega=None, Plot=True, color=None, - labelFreq=0, *args, **kwargs): +def nyquist_plot(syslist, omega=None, Plot=True, + labelFreq=0, arrowhead_length=0.1, arrowhead_width=0.1, + color=None, *args, **kwargs): """ Nyquist plot for a system @@ -452,6 +453,8 @@ def nyquist_plot(syslist, omega=None, Plot=True, color=None, Used to specify the color of the plot labelFreq : int Label every nth frequency on the plot + arrowhead_width : arrow head width + arrowhead_length : arrow head length *args Additional arguments for :func:`matplotlib.plot` (color, linestyle, etc) **kwargs: @@ -511,12 +514,14 @@ def nyquist_plot(syslist, omega=None, Plot=True, color=None, ax = plt.gca() # Plot arrow to indicate Nyquist encirclement orientation ax.arrow(x[0], y[0], (x[1]-x[0])/2, (y[1]-y[0])/2, fc=c, ec=c, - head_width=0.2, head_length=0.2) + head_width=arrowhead_width, + head_length=arrowhead_length) plt.plot(x, -y, '-', color=c, *args, **kwargs) ax.arrow( x[-1], -y[-1], (x[-1]-x[-2])/2, (y[-1]-y[-2])/2, - fc=c, ec=c, head_width=0.2, head_length=0.2) + fc=c, ec=c, head_width=arrowhead_width, + head_length=arrowhead_length) # Mark the -1 point plt.plot([-1], [0], 'r+')
Fix call to fetchThreadList Use "self" instead of "client"
@@ -488,7 +488,7 @@ class Client(object): return [] while True: lastThreadTimestamp = Threads[-1].last_message_timestamp - candidates = client.fetchThreadList(before=lastThreadTimestamp, thread_location=thread_location) # return at max 20 threads before lastThreadTimestamp (included) + candidates = self.fetchThreadList(before=lastThreadTimestamp, thread_location=thread_location) # return at max 20 threads before lastThreadTimestamp (included) if len(candidates) > 1: Threads += candidates[1:] else:
add ThreatExchange until desc add ThreatExchange until desc
@@ -371,7 +371,7 @@ script: - name: since description: 'Returns malware collected after a timestamp, format: 1391813489' - name: until - description: "-" + description: 'Returns malware collected before a timestamp, format: 1391813489' outputs: - contextPath: URL.Data description: Bad URLs found
fix: compress for raw and compressed_encoding Previously only segmentation was compressed but this is too rough a guideline. Images are also often stored losslessly, but are uncompressed because they aren't 'segmentation'. This should improve future storage costs by about 1/3.
@@ -620,7 +620,7 @@ class CloudVolume(object): if self.encoding == 'jpeg': content_type == 'image/jpeg' - compress = (self.layer_type in ('segmentation')) + compress = (self.encoding in ('raw', 'compressed_segmentation')) with Storage(self.layer_cloudpath) as storage: storage.put_files(uploads, content_type=content_type, compress=compress)
document delays with type conversions closes
@@ -1583,6 +1583,15 @@ Having `delay_before` in the second stage of the test is semantically identical to having `delay_after` in the first stage of the test - feel free to use whichever seems most appropriate. +A saved/config variable can be used by using a type token conversion, such as: + +```yaml +stages: + - name: Trigger task + ... + delay_after: !float "{sleep_time}" +``` + ## Retrying tests If you are not sure how long the server might take to process a request, you can
Fix typo And test if new docs location works for Github
@@ -22,7 +22,7 @@ SimpleMonitor is a Python script which monitors hosts and network connectivity. * Windows DHCP scope (available IPs) * APC UPS monitoring (requires apcupsd to be installed and configured) * Running an arbitary command and checking the output -* A monitor which is a compond of a number of the above +* A monitor which is a compound of a number of the above Adding more monitor types is quite simple if you are able to code in Python.
Remove warning about building from source to use the NCCL backend Summary: I think this warning isn't true anymore, and the NCCL backend works without PyTorch needing to be built from source. Pull Request resolved:
@@ -348,7 +348,7 @@ def init_process_group(backend, group_name (str, optional, deprecated): Group name. To enable ``backend == Backend.MPI``, PyTorch needs to built from source - on a system that supports MPI. The same applies to NCCL as well. + on a system that supports MPI. """ global _pg_group_ranks
Fix integration link in the installation docs The link to the home assistant integration documentation was missing the leading slash which caused the path to be appended to the `/frigate` path of this page.
@@ -3,7 +3,7 @@ id: installation title: Installation --- -Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). Note that a Home Assistant Addon is **not** the same thing as the integration. The [integration](integrations/home-assistant) is required to integrate Frigate into Home Assistant. +Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). Note that a Home Assistant Addon is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant. ## Dependencies
Make use of title in rotor plot call possible Calling rotor.plot_rotor(title=dict(text="title")) would cause an error, since the title kwarg argument would be passed twice to the update_layout call.
@@ -2036,7 +2036,8 @@ class Rotor(object): showgrid=False, mirror=True, ) - fig.update_layout(title=dict(text="Rotor Model"), **kwargs) + kwargs["title"] = kwargs.get("title", "Rotor Model") + fig.update_layout(**kwargs) return fig
Remove Optimize method signature for BlockMatrix Unused and incorrect -- if this method were used, it would not optimize any IRs in the DAG, which may contain relational or value IRs.
@@ -34,8 +34,6 @@ object Optimize { def apply(ir: MatrixIR): MatrixIR = apply(ir, true, true) - def apply(ir: BlockMatrixIR): BlockMatrixIR = ir //Currently no BlockMatrixIR that can be optimized - def apply(ir: IR, noisy: Boolean, canGenerateLiterals: Boolean, context: Option[String]): IR = optimize(ir, noisy, canGenerateLiterals, context).asInstanceOf[IR]
Update generic.txt Moving to ```smokeloader```
@@ -5873,80 +5873,6 @@ bobbychiz.top http://35.224.233.140 -# Reference: https://twitter.com/peterkruse/status/1171685525377495040 -# Reference: https://twitter.com/tkanalyst/status/1173068957386866688 -# Reference: https://pastebin.com/kZVikTtP -# Reference: https://www.virustotal.com/gui/ip-address/5.101.181.35/relations -# Reference: https://www.virustotal.com/gui/ip-address/185.25.50.148/relations -# Reference: https://www.virustotal.com/gui/ip-address/185.25.50.163/relations - -advertland.net -advertmex.world -advertserv25.world -advertserv99.club -advexmai42dn.world -advexmail23mn.world -advexmail2551.club -advexmail255143x.club -advexmail2551fc7.club -advexmail270711.club -dsmail95.xyz -dsmailx9547.xyz -ecmero.com -fdmail70.club -griffintech.ru -kxserv65.club -kxserv652.club -kxservx6527.club -mailadvert17dt.world -mailadvert19.world -mailadvert2551.club -mailadvert2551zx1.club -mailadvert5917dx.world -mailadvert917dx.world -mailserv1551.club -mailserv1551ex97.club -mailserv1551kx3.club -mailserv171.club -mailserv7.club -mailserv75.com -mailserv85m.world -mailserv93fd.world -mailstat55.club -mailstat557.club -mailstatx5577.club -mextes.com -popadvert.world -sdstat901511.club -sdstat9551.club -sdstat955192rv.club -sdstat9551as4.club -sdstat9551pm3.club -sdstat95xz.world -sdstat97tp.world -serverupdate7.world -starserver45.world -starserver4551.club -starserver4551mx2.club -starserver715km.world -starserver75ms.world -statexadver32s.world -statexadver35111.club -statexadver3552.club -statexadver3552ap93.club -statexadver3552mn12.club -swissmarine.club -zel.biz -advertserv[0-9a-z]+\.(club|world) -advexmai[0-9a-z]+\.(club|world) -kxserv[0-9a-z]+\.(club|world) -mailadvert[0-9a-z]+\.(club|world) -mailserv[0-9a-z]+\.(club|world) -mailstat[0-9a-z]+\.(club|world) -sdstat[0-9a-z]+\.(club|world) -starserver[0-9a-z]+\.(club|world) -statexadver[0-9a-z]+\.(club|world) - # Reference: https://twitter.com/killamjr/status/1171849775911772165 globalpaymentportal.co
docs: Update documentation for Bionic to Focal upgrade. Added -d Flag in do-release-upgrade for Bionic to Focal upgrade. The -d switch is necessary to upgrade from Ubuntu 18.04 LTS as upgrades have not yet been enabled and will only be enabled after the first point release of 20.04 LTS. Source
@@ -218,9 +218,13 @@ instructions for other supported platforms. ``` sudo -i # Or otherwise get a root shell - do-release-upgrade + do-release-upgrade -d ``` + The `-d` option to `do-release-upgrade` is required because Ubuntu + 20.04 is new; it will stop being necessary once the first point + release update of Ubuntu 20.04 LTS is released. + When `do-release-upgrade` asks you how to upgrade configuration files for services that Zulip manages like `redis`, `postgres`, `nginx`, and `memcached`, the best choice is `N` to keep the
ceph-osd: set 'openstack_keys_tmp' only when 'openstack_config' is defined. If 'openstack_config' is false this task shouldn't be executed.
openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}" with_items: "{{ openstack_keys }}" when: + - openstack_config - item.get('mon_cap', None) # it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap - name: set_fact keys - override keys_tmp with keys
Restrict comparison values for variable type strings This should avoid subtle bugs that could crop up when comparing against the wrong form (e.g. "category" instead of "categorical"). There might be a built-in way to do this, but I couldn't find it...
@@ -2,6 +2,7 @@ import warnings import itertools from copy import copy from functools import partial +from collections import UserString from collections.abc import Iterable, Sequence, Mapping from numbers import Number from datetime import datetime @@ -780,8 +781,10 @@ class VectorPlotter: wide_data = pd.DataFrame(data, copy=True) # At this point we should reduce the dataframe to numeric cols - numeric_cols = wide_data.apply(variable_type) == "numeric" - wide_data = wide_data.loc[:, numeric_cols] + numeric_cols = [ + k for k, v in wide_data.items() if variable_type(v) == "numeric" + ] + wide_data = wide_data[numeric_cols] # Now melt the data to long form melt_kws = {"var_name": "@columns", "value_name": "@values"} @@ -1200,6 +1203,25 @@ class VectorPlotter: ax.set_ylabel(self.variables.get("y", default_y), visible=y_visible) +class VariableType(UserString): + """ + Prevent comparisons elsewhere in the library from using the wrong name. + + Errors are simple assertions because users should not be able to trigger + them. If that changes, they should be more verbose. + + """ + allowed = "numeric", "datetime", "categorical" + + def __init__(self, data): + assert data in self.allowed, data + super().__init__(data) + + def __eq__(self, other): + assert other in self.allowed, other + return self.data == other + + def variable_type(vector, boolean_type="numeric"): """ Determine whether a vector contains numeric, categorical, or datetime data. @@ -1223,13 +1245,14 @@ def variable_type(vector, boolean_type="numeric"): var_type : 'numeric', 'categorical', or 'datetime' Name identifying the type of data in the vector. """ + # If a categorical dtype is set, infer categorical if pd.api.types.is_categorical_dtype(vector): - return "categorical" + return VariableType("categorical") # Special-case all-na data, which is always "numeric" if pd.isna(vector).all(): - return "numeric" + return VariableType("numeric") # Special-case binary/boolean data, allow caller to determine # This triggers a numpy warning when vector has strings/objects @@ -1244,14 +1267,14 @@ def variable_type(vector, boolean_type="numeric"): action='ignore', category=(FutureWarning, DeprecationWarning) ) if np.isin(vector, [0, 1, np.nan]).all(): - return boolean_type + return VariableType(boolean_type) # Defer to positive pandas tests if pd.api.types.is_numeric_dtype(vector): - return "numeric" + return VariableType("numeric") if pd.api.types.is_datetime64_dtype(vector): - return "datetime" + return VariableType("datetime") # --- If we get to here, we need to check the entries @@ -1264,7 +1287,7 @@ def variable_type(vector, boolean_type="numeric"): return True if all_numeric(vector): - return "numeric" + return VariableType("numeric") # Check for a collection where everything is a datetime @@ -1275,11 +1298,11 @@ def variable_type(vector, boolean_type="numeric"): return True if all_datetime(vector): - return "datetime" + return VariableType("datetime") # Otherwise, our final fallback is to consider things categorical - return "categorical" + return VariableType("categorical") def infer_orient(x=None, y=None, orient=None, require_numeric=True):
update release notes for 4.2.2 (from 4.2.x branch)
+# Release 4.2.2 - (May 27, 2022) + * Lightning: + - watching onchain outputs: significant perf. improvements (#7781) + - enforce relative order of some msgs during chan reestablishment, + lack of which can lead to unwanted force-closures (#7830) + - fix: in case of a force-close containing incoming HTLCs, we were + redeeming all HTLCs that we know the preimage for. This might + publish the preimage of an incomplete MPP. (1a5ef554, e74e9d8e) + * Hardware wallets: + - smarter pairing during sign_transaction (238619f1) + - keepkey: fix pairing with device using a workaround (#7779) + * fix AppImage failing to run on certain systems (#7784) + * fix "Automated BIP39 recovery" not scanning change paths (#7804) + * bypass network proxy for localhost electrum server (#3126) + + # Release 4.2.1 - (March 26, 2022) * Binaries: - Windows: we are dropping support for Windows 7. (#7728)
Fix slowdown in state machine check for global being set is needed. We should remove that global.
@@ -1101,6 +1101,7 @@ class AMRStateMachine: global entity_rules_json, entity_rule_stats, entity_rule_totals, entity_rule_fails assert self.entity_rules_path, "you need to provide entity_rules" + if not entity_rules_json: with open(self.entity_rules_path, 'r', encoding='utf8') as f: entity_rules_json = json.load(f)
Fix float focused autocomplete options being parsed According to the Discord docs these aren't validated
@@ -137,6 +137,7 @@ class Namespace: for option in options: opt_type = option['type'] name = option['name'] + focused = option.get('focused', False) if opt_type in (3, 4, 5): # string, integer, boolean value = option['value'] # type: ignore # Key is there self.__dict__[name] = value @@ -146,7 +147,11 @@ class Namespace: if value is None or value == '': self.__dict__[name] = float('nan') else: + if not focused: self.__dict__[name] = float(value) + else: + # Autocomplete focused values tend to be garbage in + self.__dict__[name] = value elif opt_type in (6, 7, 8, 9, 11): # Remaining ones should be snowflake based ones with resolved data snowflake: str = option['value'] # type: ignore # Key is there
ColumnEncodingUtility : make few system queries search_path independent FK and PK queries were relying on the search path. This is not the case anymore. Only queries using pg_table_def still depends on search_path
@@ -230,13 +230,6 @@ def get_pg_conn(): run_commands(conn, [set_name]) - # Set search_path - set_searchpath = "set search_path to '$user', public, %s;" % schema_name - if debug: - comment(set_searchpath) - - run_commands(conn, [set_searchpath]) - # turn off autocommit for the rest of the executions conn.autocommit = False @@ -324,12 +317,9 @@ def get_foreign_keys(schema_name, set_target_schema, table_name): fk_statement = '''SELECT /* fetching foreign key relations */ conname, pg_catalog.pg_get_constraintdef(cons.oid, true) as condef FROM pg_catalog.pg_constraint cons, - pg_namespace pgn, pg_class pgc WHERE cons.conrelid = pgc.oid - and pgn.nspname = '%s' - and pgc.relnamespace = pgn.oid - and pgc.oid = '%s'::regclass + and pgc.oid = '%s."%s"'::regclass AND cons.contype = 'f' ORDER BY 1 ''' % (schema_name, table_name) @@ -359,18 +349,16 @@ def get_primary_key(schema_name, set_target_schema, original_table, new_table): # get the primary key columns statement = '''SELECT /* fetch primary key information */ att.attname -FROM pg_index ind, pg_class cl, pg_attribute att, pg_namespace pgn +FROM pg_index ind, pg_class cl, pg_attribute att WHERE - cl.oid = '%s'::regclass + cl.oid = '%s."%s"'::regclass AND ind.indrelid = cl.oid AND att.attrelid = cl.oid - and cl.relnamespace = pgn.oid - and pgn.nspname = '%s' and att.attnum = ANY(string_to_array(textin(int2vectorout(ind.indkey)), ' ')) and attnum > 0 AND ind.indisprimary order by att.attnum; -''' % (original_table, schema_name) +''' % (schema_name, original_table) if debug: comment(statement)
Add simple unittest for -include Missing test was flagged by code coverage because the line was "touched" due to a variable rename.
@@ -810,11 +810,13 @@ sys.exit(0) "-fmerge-all-constants " "-fopenmp " "-mno-cygwin -mwindows " - "-arch i386 -isysroot /tmp " + "-arch i386 " + "-isysroot /tmp " "-iquote /usr/include/foo1 " "-isystem /usr/include/foo2 " "-idirafter /usr/include/foo3 " "-imacros /usr/include/foo4 " + "-include /usr/include/foo5 " "--param l1-cache-size=32 --param l2-cache-size=6144 " "+DD64 " "-DFOO -DBAR=value -D BAZ " @@ -832,6 +834,7 @@ sys.exit(0) ('-isystem', '/usr/include/foo2'), ('-idirafter', '/usr/include/foo3'), ('-imacros', env.fs.File('/usr/include/foo4')), + ('-include', env.fs.File('/usr/include/foo5')), ('--param', 'l1-cache-size=32'), ('--param', 'l2-cache-size=6144'), '+DD64'], repr(d['CCFLAGS']) assert d['CXXFLAGS'] == ['-std=c++0x'], repr(d['CXXFLAGS'])
Memorize idp only if auth was successful This also means that we memorize the IdP regardless of the disco.
@@ -317,7 +317,9 @@ class SAMLBackend(BackendModule, SAMLBaseModule): raise SATOSAAuthenticationError(context.state, "State did not match relay state") context.decorate(Context.KEY_BACKEND_METADATA_STORE, self.sp.metadata) - + if self.config.get(SAMLBackend.KEY_MEMORIZE_DISCO_IDP): + issuer = authn_response.response.issuer.text.strip() + context.state[Context.KEY_MEMORIZED_DISCO_IDP] = issuer context.state.pop(self.name, None) context.state.pop(Context.KEY_FORCE_AUTHN, None) return self.auth_callback_func(context, self._translate_response(authn_response, context.state)) @@ -341,9 +343,6 @@ class SAMLBackend(BackendModule, SAMLBaseModule): satosa_logging(logger, logging.DEBUG, "No IDP chosen for state", state, exc_info=True) raise SATOSAAuthenticationError(state, "No IDP chosen") from err - if self.config.get(SAMLBackend.KEY_MEMORIZE_DISCO_IDP): - context.state[Context.KEY_MEMORIZED_DISCO_IDP] = entity_id - return self.authn_request(context, entity_id) def _translate_response(self, response, state):
Fixes a bug in Circuit indexing: circuit[:] now works like it should. Previously this would return an empty circuit, like circuit[0:0], which is obviously incorrect. We should create a unit test for this.
@@ -778,7 +778,7 @@ class Circuit(object): layers = list(range(len(self._labels))) elif isinstance(layers, slice): if layers.start is None and layers.stop is None: - layers = () + layers = list(range(len(self._labels))) # e.g. circuit[:] else: layers = _slct.indices(layers, len(self._labels)) elif not isinstance(layers, (list, tuple)):
fixed typo in file OCC.py line 59 missing reference to imported library multiclass
@@ -56,7 +56,7 @@ class OutputCodeClassifier(base.Wrapper, base.Classifier): >>> dataset = datasets.ImageSegments() >>> scaler = preprocessing.StandardScaler() - >>> ooc = OutputCodeClassifier( + >>> ooc = multiclass.OutputCodeClassifier( ... classifier=linear_model.LogisticRegression(), ... code_size=10, ... seed=24
Accelerate evaluable.derivative This patch adds a significant shortcut to function.derivative that returns zero when var is not in a function's arguments, rather than climbing up and down the function tree to arrive at the same result at much greater effort.
@@ -4031,11 +4031,10 @@ def derivative(func, var, seen=None): 'derivative' assert isinstance(var, DerivativeTargetBase), 'invalid derivative target {!r}'.format(var) - if var.dtype != float: + if var.dtype != float or var not in func.arguments: return Zeros(func.shape + var.shape, dtype=func.dtype) if seen is None: seen = {} - func = asarray(func) if func in seen: result = seen[func] else:
Fix spelling `Gitbub` -> `GitHub`
- First take a look at the [Troubleshooting section](https://help.datadoghq.com/hc/en-us/sections/200763635-Amazon-Web-Services) of our [Knowledge Base](https://help.datadoghq.com/hc/en-us). - If you can't find anything useful, please contact our Solutions Team for assistance. -- Finally, you can open a Gitbub issue +- Finally, you can open a GitHub issue ## Pull Requests
Update practices.rst fix a typo
@@ -238,7 +238,7 @@ Here are some tips to keep in mind when dealing with these kinds of sites: * if possible, use `Google cache`_ to fetch pages, instead of hitting the sites directly * use a pool of rotating IPs. For example, the free `Tor project`_ or paid - services like `ProxyMesh`_. An open source alterantive is `scrapoxy`_, a + services like `ProxyMesh`_. An open source alternative is `scrapoxy`_, a super proxy that you can attach your own proxies to. * use a highly distributed downloader that circumvents bans internally, so you can just focus on parsing clean pages. One example of such downloaders is
m1n1.hv.HV: Run passive tracers *before* issuing the MMIO write E.g. this means tracers run *before* an ASC command gets sent, which might be relevant if the same memory is used for commands and responses.
@@ -382,18 +382,12 @@ class HV(Reloadable): first = 0 val = data.data - if data.flags.WRITE: - if data.flags.WIDTH < 3: - wval = val[0] - else: - wval = val - if mode == TraceMode.HOOK: - if data.flags.WRITE: - self.shellwrap(lambda: write(data.addr, wval, 8 << data.flags.WIDTH, **kwargs), - f"Tracer {ident}:write (HOOK)", update=do_update) + if mode not in (TraceMode.HOOK, TraceMode.SYNC): + raise Exception(f"VM hook with unexpected mapping at {data.addr:#x}: {maps[0][0].name}") - else: + if not data.flags.WRITE: + if mode == TraceMode.HOOK: val = self.shellwrap(lambda: read(data.addr, 8 << data.flags.WIDTH, **kwargs), f"Tracer {ident}:read (HOOK)", update=do_update, needs_ret=True) @@ -401,19 +395,16 @@ class HV(Reloadable): val = [val] first += 1 elif mode == TraceMode.SYNC: - if data.flags.WRITE: - self.u.write(data.addr, wval, 8 << data.flags.WIDTH) - else: val = self.u.read(data.addr, 8 << data.flags.WIDTH) if not isinstance(val, list) and not isinstance(val, tuple): val = [val] - else: - raise Exception(f"VM hook with unexpected mapping at {data.addr:#x}: {maps[0][0].name}") - if not data.flags.WRITE: for i in range(1 << max(0, data.flags.WIDTH - 3)): self.p.write64(ctx.data + 16 + 8 * i, val[i]) + elif mode == TraceMode.HOOK: + first += 1 + flags = data.flags.copy() width = data.flags.WIDTH @@ -440,6 +431,20 @@ class HV(Reloadable): self.shellwrap(lambda: read(evt, **kwargs), f"Tracer {ident}:read ({mode.name})", update=do_update) + if data.flags.WRITE: + mode, ident, read, write, kwargs = maps[0] + + if data.flags.WIDTH < 3: + wval = val[0] + else: + wval = val + + if mode == TraceMode.HOOK: + self.shellwrap(lambda: write(data.addr, wval, 8 << data.flags.WIDTH, **kwargs), + f"Tracer {ident}:write (HOOK)", update=do_update) + elif mode == TraceMode.SYNC: + self.u.write(data.addr, wval, 8 << data.flags.WIDTH) + return True def handle_vm_hook(self, ctx):
[commands] Add support for stacking Cog.listener decorator. Fix
@@ -102,7 +102,8 @@ class CogMeta(type): except AttributeError: continue else: - listeners.append((value.__cog_listener_name__, value.__name__)) + for name in value.__cog_listener_names__: + listeners.append((name, value.__name__)) attrs['__cog_commands__'] = commands # this will be copied in Cog.__new__ attrs['__cog_listeners__'] = tuple(listeners) @@ -209,7 +210,11 @@ class Cog(metaclass=CogMeta): if not inspect.iscoroutinefunction(func): raise TypeError('Listener function must be a coroutine function.') func.__cog_listener__ = True - func.__cog_listener_name__ = name or func.__name__ + to_assign = name or func.__name__ + try: + func.__cog_listener_names__.append(to_assign) + except AttributeError: + func.__cog_listener_names__ = [to_assign] return func return decorator
Add two missing `ForwardRef` attributes These look somewhat like implementation details, but no more so than any of the other dunder attributes that are already on the class.
@@ -1215,6 +1215,8 @@ if sys.version_info >= (3, 7): __forward_evaluated__: bool __forward_value__: Any | None __forward_is_argument__: bool + __forward_is_class__: bool + __forward_module__: Any | None if sys.version_info >= (3, 9): # The module and is_class arguments were added in later Python 3.9 versions. def __init__(self, arg: str, is_argument: bool = ..., module: Any | None = ..., *, is_class: bool = ...) -> None: ...
[Chore] Add Big Sur tezos-sappling-params bottle Problem: tezos-sapling-params formula is used as a dependency for the rest of the formulae with Octez binaries. However, we don't have Big Sur bottle for it. Solution: Since this formula isn't updated automatically, provide bottle hash manually.
@@ -15,6 +15,7 @@ class TezosSaplingParams < Formula root_url "https://github.com/serokell/tezos-packaging/releases/download/#{TezosSaplingParams.version}/" sha256 cellar: :any, mojave: "4e89932b0626cffe80214ba45342280c340b34c58ebbf7c3e0185a6d4662732d" sha256 cellar: :any, catalina: "5f7a5687d67051eafcfb7cb5ac542143a325a135403daeca6595602bfd400441" + sha256 cellar: :any, big_sur: "c910acffd3369bf5c4e0cff112efe6d56035394639b9571d845ad5ecb4dbd01f" end def install
Fix file path handling in setuptools hack. Closes
@@ -851,7 +851,7 @@ def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet= if file not in m.sources: # Old setuptools unconditionally replaces .pyx with .c/.cpp - target_file = file.rsplit('.')[0] + ('.cpp' if m.language == 'c++' else '.c') + target_file = os.path.splitext(file)[0] + ('.cpp' if m.language == 'c++' else '.c') try: m.sources.remove(target_file) except ValueError:
fix data storage warnings use the correct numpy ABC for numbers
@@ -142,7 +142,7 @@ def write_dict_to_hdf5(data_dict: dict, entry_point): """ for key, item in data_dict.items(): # Basic types - if isinstance(item, (str, float, int, bool, + if isinstance(item, (str, float, int, bool, np.number, np.float_, np.int_, np.bool_)): try: entry_point.attrs[key] = item
feat(option_commodity_sina.py): add option_commodity_sina interface add option_commodity_sina interface
@@ -672,8 +672,8 @@ def get_futures_index(df): if __name__ == "__main__": get_futures_daily_df = get_futures_daily( - start_day="20200415", end_day="20200416", market="DCE", index_bar=False + start_day="20200701", end_day="20200716", market="DCE", index_bar=False ) print(get_futures_daily_df) - get_dce_daily_df = get_dce_daily(date="20200416", symbol_type="futures", retries=0) + get_dce_daily_df = get_dce_daily(date="20200716", symbol_type="futures", retries=0) print(get_dce_daily_df)
Allow inferred scaling in MultiheadSelfAttention for head_dim != 64 Summary: Rather than raise an exception whenever head_dim != 64, we can just infer the scaling value and continue to provide a warning. Also add an assertion in case embed_dim is not a multiple of num_heads (in which case forward will break).
+import logging +import math from typing import Optional, List, Union import torch from torch import nn from torch.nn import Module - -import math - from torch.nn import functional as F +logger = logging.getLogger(__name__) + class PositionalEmbedding(Module): - def __init__( - self, num_embeddings: int, embedding_dim: int, pad_index: int - ): + def __init__(self, num_embeddings: int, embedding_dim: int, pad_index: int): super().__init__() self.embedding = nn.Embedding(num_embeddings, embedding_dim, pad_index) self.pad_index = pad_index @@ -79,19 +78,21 @@ class MultiheadSelfAttention(Module): expected_scaling = float(1 / math.sqrt(self.head_dim)) - if not scaling and self.head_dim == 64: - scaling = 0.125 + assert ( + embed_dim % num_heads == 0 + ), f"embed_dim={embed_dim} should be a multiple of num_heads={num_heads}" if not scaling: - raise Exception( + logger.warn( f""" - Scaling not set. Please manually set scaling for transformers with - head_dim != 64. The suggested value in this case is {expected_scaling}, + Scaling not set. Please manually set scaling for transformers. + In this case the suggested value {expected_scaling} will be inferred, or float(1 / math.sqrt(head_dim)) where head_dim = embed_dim // num_heads = {self.head_dim} and embed_dim = {embed_dim} and num_heads = {num_heads}. """ ) + scaling = expected_scaling self.scaling = scaling self.dropout = nn.Dropout(dropout)
fix: Build crashes when printing message on Windows when code is in a different drive For compatibility with Windows. relpath() fails when the specified path is on a different drive.
@@ -158,8 +158,19 @@ def do_cli(function_identifier, # pylint: disable=too-many-locals click.secho("\nBuild Succeeded", fg="green") - msg = gen_success_msg(os.path.relpath(ctx.build_dir), - os.path.relpath(ctx.output_template_path), + # try to use relpath so the command is easier to understand, however, + # under Windows, when SAM and (build_dir or output_template_path) are + # on different drive, relpath() fails. + try: + build_dir_in_success_message = os.path.relpath(ctx.build_dir) + output_template_path_in_success_message = os.path.relpath(ctx.output_template_path) + except ValueError: + LOG.debug("Failed to retrieve relpath - using the specified path as-is instead") + build_dir_in_success_message = ctx.build_dir + output_template_path_in_success_message = ctx.output_template_path + + msg = gen_success_msg(build_dir_in_success_message, + output_template_path_in_success_message, os.path.abspath(ctx.build_dir) == os.path.abspath(DEFAULT_BUILD_DIR)) click.secho(msg, fg="yellow")
Update setup.py add comment about cx_freeze excludes behavior
@@ -52,6 +52,10 @@ install_requires = [ ] includes = [] +# WARNING: As of cx_freeze there is a bug? +# when this is empty, its hooks will not kick in +# and won't clean platform irrelevant modules +# like dbm mentioned above. excludes = [ "openpype" ]
Add appeal categories to mod categories This allows us to run moderation commands in the appeal categories
@@ -144,6 +144,8 @@ guild: logs: &LOGS 468520609152892958 moderators: &MODS_CATEGORY 749736277464842262 modmail: &MODMAIL 714494672835444826 + appeals: &APPEALS 890331800025563216 + appeals2: &APPEALS2 895417395261341766 voice: 356013253765234688 summer_code_jam: 861692638540857384 @@ -238,6 +240,8 @@ guild: - *MODS_CATEGORY - *MODMAIL - *LOGS + - *APPEALS + - *APPEALS2 moderation_channels: - *ADMINS
spark/table/union: The original implementation is too resource-intensive, change the new implementation
@@ -134,8 +134,8 @@ class Table(CTableABC): return from_rdd(_subtract_by_key(self._rdd, other._rdd)) @computing_profile - def union(self, other: 'Table', func=lambda v1, v2: v1, **kwargs): - return from_rdd(_union(self._rdd, other._rdd, func)) + def union(self, other: 'Table', **kwargs): + return from_rdd(_union(self._rdd, other._rdd)) def from_hdfs(paths: str, partitions): @@ -253,20 +253,9 @@ def _subtract_by_key(rdd, other): return rdd.subtractByKey(other, rdd.getNumPartitions()) -def _union(rdd, other, func): +def _union(rdd, other): num_partition = max(rdd.getNumPartitions(), other.getNumPartitions()) - - def _func(pair): - iter1, iter2 = pair - val1 = list(iter1) - val2 = list(iter2) - if not val1: - return val2[0] - if not val2: - return val1[0] - return func(val1[0], val2[0]) - - return _map_value(rdd.cogroup(other, num_partition), _func) + return rdd.union(other).coalesce(num_partition) def _flat_map(rdd, func):
fixed (hopefully) occasional memory error Altered the construction of c_arrays to hold references to strides and shapes arrays before being put into the struct
@@ -10,6 +10,8 @@ namespace py = pybind11; struct c_array py2c(py::buffer_info info) { char format[6]; strcpy(format, info.format.c_str()); + const ssize_t *shape = &info.shape[0]; + const ssize_t *strides = &info.strides[0]; struct c_array out = { info.ptr, @@ -17,8 +19,8 @@ struct c_array py2c(py::buffer_info info) { info.size, format, info.ndim, - &info.shape[0], - &info.strides[0] + shape, + strides, }; return out; }
Simplifies Redundant Unsilence Target Test Removes redundant functionality from the `test_unsilence_helper_fail` test as it is covered by another test. Keeps the functionality that isn't being tested elsewhere.
@@ -688,42 +688,17 @@ class UnsilenceTests(unittest.IsolatedAsyncioTestCase): self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict) - @mock.patch.object(silence.Silence, "_unsilence", return_value=False) - @mock.patch.object(silence.Silence, "send_message") - async def test_unsilence_helper_fail(self, send_message, _): - """Tests unsilence_wrapper when `_unsilence` fails.""" - ctx = MockContext() - - text_channel = MockTextChannel() - text_role = self.cog.bot.get_guild(Guild.id).default_role - - voice_channel = MockVoiceChannel() - voice_role = self.cog.bot.get_guild(Guild.id).get_role(Roles.voice_verified) - + async def test_unsilence_role(self): + """Tests unsilence_wrapper applies permission to the correct role.""" test_cases = ( - (ctx, text_channel, text_role, True, silence.MSG_UNSILENCE_FAIL), - (ctx, text_channel, text_role, False, silence.MSG_UNSILENCE_MANUAL), - (ctx, voice_channel, voice_role, True, silence.MSG_UNSILENCE_FAIL), - (ctx, voice_channel, voice_role, False, silence.MSG_UNSILENCE_MANUAL), + (MockTextChannel(), self.cog.bot.get_guild(Guild.id).default_role), + (MockVoiceChannel(), self.cog.bot.get_guild(Guild.id).get_role(Roles.voice_verified)) ) - class PermClass: - """Class to Mock return permissions""" - def __init__(self, value: bool): - self.val = value - - def __getattr__(self, item): - return self.val - - for context, channel, role, permission, message in test_cases: - with mock.patch.object(channel, "overwrites_for", return_value=PermClass(permission)) as overwrites: - with self.subTest(channel=channel, message=message): - await self.cog._unsilence_wrapper(channel, context) - - overwrites.assert_called_once_with(role) - send_message.assert_called_once_with(message, ctx.channel, channel, alert_target=False) - - send_message.reset_mock() + for channel, role in test_cases: + with self.subTest(channel=channel, role=role): + await self.cog._unsilence_wrapper(channel, MockContext()) + channel.overwrites_for.assert_called_with(role) @mock.patch.object(silence.Silence, "_force_voice_sync") @mock.patch.object(silence.Silence, "send_message")
Fix a minor bug for pb optional field In `Pubsub.continuously_read_stream`, it checks whether this is a control message enclosed in RPC message with `if rpc_incoming.control:`. However, in pb2, the condition is always true because a default value is returned when a field is not set. Solved it by changing it to `if rpc_incoming.HasField("control"):`.
@@ -159,7 +159,11 @@ class Pubsub: for message in rpc_incoming.subscriptions: self.handle_subscription(peer_id, message) - if rpc_incoming.control: + # pylint: disable=line-too-long + # NOTE: Check if `rpc_incoming.control` is set through `HasField`. + # This is necessary because `control` is an optional field in pb2. + # Ref: https://developers.google.com/protocol-buffers/docs/reference/python-generated#singular-fields-proto2 + if rpc_incoming.HasField("control"): # Pass rpc to router so router could perform custom logic await self.router.handle_rpc(rpc_incoming, peer_id)
Remove deadcode Summary: We don't need this in Lightning trainers
@@ -8,7 +8,6 @@ import reagent.types as rlt import torch from reagent.core.configuration import resolve_defaults from reagent.core.dataclasses import dataclass, field -from reagent.core.tracker import observable from reagent.optimizer import Optimizer__Union, SoftUpdate from reagent.parameters import EvaluationParameters, RLParameters from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning @@ -24,17 +23,6 @@ class BCQConfig: drop_threshold: float = 0.1 -@observable( - td_loss=torch.Tensor, - reward_loss=torch.Tensor, - logged_actions=torch.Tensor, - logged_propensities=torch.Tensor, - logged_rewards=torch.Tensor, - model_propensities=torch.Tensor, - model_rewards=torch.Tensor, - model_values=torch.Tensor, - model_action_idxs=torch.Tensor, -) class DQNTrainer(DQNTrainerBaseLightning): @resolve_defaults def __init__(
(re)Added !clan command Now it works, chache goes vroooom.
@@ -2583,6 +2583,21 @@ async def clan_info(ctx: Context) -> Optional[str]: return "\n".join(msg) +@clan_commands.add(Privileges.NORMAL) +async def clan_leave(ctx: Context): + """Leaves the clan you're in.""" + p = await app.state.sessions.players.from_cache_or_sql(name=ctx.player.name) + + if p.clan == None: + return "You're not in a clan." + elif p.clan_priv != 3: + return "You must pass the clan ownership before leaving it. Alternatively you can use !clan disband." + + await app.state.services.database.execute( + "UPDATE users SET clan_id=0, clan_priv=0 WHERE id = :uid", {"uid": p.id} + ) + p.clan.remove_member(p) + return "You have successfully left your clan." # TODO: !clan inv, !clan join, !clan leave
Added: PReLU to caffe emitter. Also fixed unicode issues for python2.
@@ -74,7 +74,7 @@ def gen_weight(weight_file, model, prototxt): global __weights_dict __weights_dict = load_weights(weight_file) - net = caffe.Net(str(prototxt), caffe.TRAIN) + net = caffe.Net(prototxt, caffe.TRAIN) for key in __weights_dict: if 'weights' in __weights_dict[key]: @@ -88,6 +88,8 @@ def gen_weight(weight_file, model, prototxt): net.params[key][0].data.flat = __weights_dict[key]['scale'] if 'bias' in __weights_dict[key]: net.params[key][1].data.flat = __weights_dict[key]['bias'] + if 'gamma' in __weights_dict[key]: # used for prelu, not sure if other layers use this too + net.params[key][0].data.flat = __weights_dict[key]['gamma'] net.save(model) return net @@ -99,8 +101,9 @@ if __name__=='__main__': parser.add_argument('--prototxt', '-p', type=_text_type, default='caffe_converted.prototxt') parser.add_argument('--model', '-m', type=_text_type, default='caffe_converted.caffemodel') args = parser.parse_args() - make_net(args.prototxt) - gen_weight(args.weight_file, args.model, args.prototxt) + # For some reason argparser gives us unicode, so we need to conver to str first + make_net(str(args.prototxt)) + gen_weight(str(args.weight_file), str(args.model), str(args.prototxt)) """ @@ -251,7 +254,8 @@ bias_term={}, ntop=1)".format( self.phase == 'test' )) scale_layer_var_name = IR_node.variable_name + "_scale" - self.add_body(1, "n.{:<15} = L.Scale(n.{}, bias_term={}, ntop=1)".format( + # Since the scale layer is "almost part" of the bn layer, we can safely use in_place here. + self.add_body(1, "n.{:<15} = L.Scale(n.{}, bias_term={}, in_place=True, ntop=1)".format( scale_layer_var_name, IR_node.variable_name, IR_node.get_attr('bias', False) @@ -315,6 +319,13 @@ bias_term={}, ntop=1)".format( self.parent_variable_name(IR_node), in_place)) + def emit_PRelu(self, IR_node): + in_place = True + self.add_body(1, "n.{:<15} = L.PReLU(n.{}, in_place={}, ntop=1)".format( + IR_node.variable_name, + self.parent_variable_name(IR_node), + in_place)) + def emit_Softmax(self, IR_node): self.add_body(1, "n.{:<15} = L.Softmax(n.{}, ntop=1)".format(
docs/getting-started.rst: Add missing argument to init role The default driver is 'delegated' nowadays and this document is about using the 'docker' driver, so "--driver-name docker" has to be appended to the "molecule init role" command line. Ref:
@@ -32,7 +32,7 @@ To generate a new role with Molecule, simply run: .. code-block:: bash - $ molecule init role my-new-role + $ molecule init role my-new-role --driver-name docker You should then see a ``my-new-role`` folder in your current directory.
DOC: stats: Fix versionadded markup for odds_ratio [skip azp]
@@ -363,7 +363,7 @@ def odds_ratio(table, *, kind='conditional'): The conditional odds ratio was discussed by Fisher (see "Example 1" of [1]_). Texts that cover the odds ratio include [2]_ and [3]_. - .. versionadded:: 1.7.0 + .. versionadded:: 1.10.0 References ----------
slight tweaks to side-nav behavior toggle in both directions depending on tablet transitions also gets rid of lint warning
title(newVal, oldVal) { document.title = `${newVal} - Kolibri`; }, - 'windowSize.breakpoint': function (newVal, oldVal) { // eslint-disable-line object-shorthand - // Pop out the nav if transitioning from smaller viewport. - if (oldVal < 5 & newVal > 4) { + 'windowSize.breakpoint': function updateNav(newVal, oldVal) { + if (oldVal === 4 & newVal === 5) { + // Pop out the nav if transitioning from 4 to 5 this.navShown = true; + } else if (oldVal === 2 & newVal === 1) { + // Pop in the nav if transitioning from 2 to 1 + this.navShown = false; } }, }, data: () => ({ scrolled: false, - navShown: true, + navShown: false, }), computed: { mobile() { this.scrolled = false; } }, 75); - if (this.mobile) { - this.navShown = false; + if (this.windowSize.breakpoint >= 5) { + this.navShown = true; } }, };
prod_settings_template: Standardize length of heading hashes. Adjust "mandatory settings" and "Gitlab OAuth" lengths to match the length of all of the rest of their same-level headings.
@@ -14,7 +14,7 @@ from typing import Any, Dict, Tuple ## su zulip -c /home/zulip/deployments/current/scripts/restart-server -################################ +################ ## Mandatory settings. ## ## These settings MUST be set in production. In a development environment, @@ -268,7 +268,7 @@ AUTH_LDAP_USER_ATTR_MAP = { ## client secret in zulip-secrets.conf as `social_auth_google_secret`. # SOCIAL_AUTH_GOOGLE_KEY = <your client ID from Google> -####### +######## ## GitLab OAuth. ## ## To set up GitLab authentication, you'll need to do the following:
Prepare 2.0.1rc4. [ci skip-rust]
@@ -6,6 +6,26 @@ This document describes releases leading up to the ``2.0.x`` ``stable`` series. See https://www.pantsbuild.org/v2.0/docs/release-notes-2-0 for an overview of the changes in this release, and https://www.pantsbuild.org/docs/plugin-upgrade-guide for a plugin upgrade guide. +2.0.1rc4 (12/09/2020) +--------------------- + +Bugfixes +~~~~~~~~ + +* Revert "Move graph cycle detection to rust. (#11202)" (cherrypick of #11272) (#11277) + `PR #11202 <https://github.com/pantsbuild/pants/pull/11202>`_ + `PR #11277 <https://github.com/pantsbuild/pants/pull/11277>`_ + +* Increase Pants' python recursion limit by default, and allow it to be overridden. (cherrypick of #11271) (#11276) + `PR #11276 <https://github.com/pantsbuild/pants/pull/11276>`_ + +Internal +~~~~~~~~ + +* [tests] Use a bandit version that works with older python versions. (cherrypick of #11268) (#11279) + `PR #11279 <https://github.com/pantsbuild/pants/pull/11279>`_ + + 2.0.1rc3 (11/24/2020) ---------------------
[backwards incompatible] switch ItemLoader from .extract to .getall. This change is backwards incompatible if ItemLoader is used with a custom Selector subclass which overrides .extract without overriding .getall.
@@ -181,7 +181,7 @@ class ItemLoader(object): def _get_xpathvalues(self, xpaths, **kw): self._check_selector_method() xpaths = arg_to_iter(xpaths) - return flatten(self.selector.xpath(xpath).extract() for xpath in xpaths) + return flatten(self.selector.xpath(xpath).getall() for xpath in xpaths) def add_css(self, field_name, css, *processors, **kw): values = self._get_cssvalues(css, **kw) @@ -198,6 +198,6 @@ class ItemLoader(object): def _get_cssvalues(self, csss, **kw): self._check_selector_method() csss = arg_to_iter(csss) - return flatten(self.selector.css(css).extract() for css in csss) + return flatten(self.selector.css(css).getall() for css in csss) XPathItemLoader = create_deprecated_class('XPathItemLoader', ItemLoader)
Updates ROUTING.md: fix typo in prefixes. improves
@@ -109,7 +109,7 @@ in addition to `hug.http` hug includes convience decorators for all common HTTP - `examples`: A list of or a single example set of parameters in URL query param format. For example: `examples="argument_1=x&argument_2=y"` - `versions`: A list of or a single integer version of the API this endpoint supports. To support a range of versions the Python builtin range function can be used. - `suffixes`: A list of or a single suffix to add to the end of all URLs using this router. - - `prefixes`: A list of or a single suffix to add to the end of all URLs using this router. + - `prefixes`: A list of or a single prefix to add to before all URLs using this router. - `response_headers`: An optional dictionary of response headers to set automatically on every request to this endpoint. - `status`: An optional status code to automatically apply to the response on every request to this endpoint. - `parse_body`: If `True` and the format of the request body matches one known by hug, hug will run the specified input formatter on the request body before passing it as an argument to the routed function. Defaults to `True`.
Fix reference to `.first()` in docs. Replaces Fixes
@@ -581,9 +581,9 @@ For more information, see the documentation on: * :py:meth:`Model.get` * :py:meth:`Model.get_by_id` * :py:meth:`Model.get_or_none` - if no matching row is found, return ``None``. -* :py:meth:`Model.first` * :py:meth:`Model.select` * :py:meth:`SelectBase.get` +* :py:meth:`SelectBase.first` - return first record of result-set or ``None``. Create or get -------------
[tests] Update FamilyTestGenerator in generate_family_file generate_family_file.FamilyFileGenerator.writefile has a verify parameter.
@@ -29,7 +29,7 @@ class FamilyTestGenerator(generate_family_file.FamilyFileGenerator): super().getapis() self.langs = save - def writefile(self): + def writefile(self, verify): """Pass writing.""" pass
Update connectionBuilder.js Add code snippet to use odbc-connect-string-extras set in postgres_odbc
params["UseDeclareFetch"] = "1"; params["Fetch"] = "2048"; + var odbcConnectStringExtrasMap = {}; + if ("odbc-connect-string-extras" in attr) + { + odbcConnectStringExtrasMap = connectionHelper.ParseODBCConnectString(attr["odbc-connect-string-extras"]); + } + for (var key in odbcConnectStringExtrasMap) + { + params[key] = odbcConnectStringExtrasMap[key]; + } + var formattedParams = []; formattedParams.push(connectionHelper.formatKeyValuePair(driverLocator.keywordDriver, driverLocator.locateDriver(attr)));
trying to figure out why travis didn't load my tests removed the pypy check for now, so the pypy tests will definitely fail
@@ -52,8 +52,8 @@ install: - python -c 'import awkward; print(awkward.__version__)' - export AWKWARD_DEPLOYMENT=base - pip install --upgrade pyOpenSSL # for deployment - - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]] ; then pip install pybind11 ; fi - - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]] ; then ln -s ../awkward-cpp/awkward/cpp awkward/cpp ; fi + - pip install pybind11 + - ln -s ../awkward-cpp/awkward/cpp awkward/cpp - python setup.py install - cd awkward-cpp - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]] ; then python setup.py install ; fi
make jit logging visible, so it can be used in a TVM compiler Summary: Pull Request resolved:
#pragma once #include <string> +#include <torch/csrc/WindowsTorchApiMacro.h> + // To enable logging please set(export) PYTORCH_JIT_LOG_LEVEL to // the ordinal value of one of the following logging levels: 1 for GRAPH_DUMP, // 2 for GRAPH_UPDATE, 3 for GRAPH_DEBUG. @@ -23,15 +25,15 @@ enum class JitLoggingLevels { std::string debugValueOrDefault(const Node* n); -JitLoggingLevels jit_log_level(); +TORCH_API JitLoggingLevels jit_log_level(); -std::string jit_log_prefix( +TORCH_API std::string jit_log_prefix( JitLoggingLevels level, const char* fn, int l, const std::string& in_str); -std::ostream& operator<<(std::ostream& out, JitLoggingLevels level); +TORCH_API std::ostream& operator<<(std::ostream& out, JitLoggingLevels level); #define JIT_LOG(level, ...) \ if (jit_log_level() != JitLoggingLevels::OFF && jit_log_level() >= level) { \
Replace deprecated BuildEnvironment.create_index() This function was deprecated in sphinx 1.6 and removed in 2.0. The function call is replaced with the recommended replacement according to
@@ -55,6 +55,7 @@ from sphinx.util.console import darkgreen, red from sphinx.util import SEP from sphinx.util import ustrftime from sphinx.environment import NoUri +from sphinx.environment.adapters.indexentries import IndexEntries from sphinx.locale import admonitionlabels, versionlabels if sphinx.__version__ >= '1.': from sphinx.locale import _ @@ -212,7 +213,7 @@ class PDFBuilder(Builder): self.env.indexentries={} for dname in self.docnames: self.env.indexentries[dname]=t.get(dname,[]) - genindex = self.env.create_index(self) + genindex = IndexEntries(self.env).create_index(self) self.env.indexentries=t # EOH (End Of Hack)
Add condition to os-net-config run during upgrade. This add two conditionals: - first check that os-net-config needs upgrade - second verify that the configuration file exist and non empty. This prevent unnecessary run of os-net-config and error in certain network configuration. Closes-Bug:
@@ -53,6 +53,16 @@ outputs: fail: msg="rpm-python package was not present before this run! Check environment before re-running" when: rpm_python_check.changed != false tags: step0 + - name: Check for os-net-config upgrade + shell: yum check-upgrade | grep os-net-config + register: os_net_config_need_upgrade + ignore_error: True + tags: step3 + - name: Check that os-net-config has configuration + shell: test -s /etc/os-net-config/config.json + register: os_net_config_has_config + ignore_error: True + tags: step3 - block: - name: Upgrade os-net-config yum: name=os-net-config state=latest @@ -62,6 +72,7 @@ outputs: failed_when: os_net_config_upgrade.rc not in [0,2] changed_when: os_net_config_upgrade.rc == 2 tags: step3 + when: not os_net_config_need_upgrade.stdout and os_net_config_has_config.rc == 0 - name: Update all packages tags: step3 yum: name=* state=latest
Add testdir examples to CONTRIBUTING guide Hopefully Closes:
@@ -280,6 +280,37 @@ Here is a simple overview, with pytest-specific bits: base: features # if it's a feature +Writing Tests +---------------------------- + +Writing tests for plugins or for pytest itself is done using the `testdir fixture <https://docs.pytest.org/en/latest/reference.html#testdir>`_, + +For example: + +.. code-block:: python + + def test_true_assertion(testdir): + testdir.makepyfile( + """ + def test_foo(): + assert True + """ + ) + result = testdir.runpytest() + result.assert_outcomes(failed=0, passed=1) + + + def test_true_assertion(testdir): + testdir.makepyfile( + """ + def test_foo(): + assert False + """ + ) + result = testdir.runpytest() + result.assert_outcomes(failed=1, passed=0) + + Joining the Development Team ----------------------------
Shuffle AssociationItem code So post_update() is no longer needed.
@@ -139,12 +139,6 @@ class AssociationItem(LinePresentation[UML.Association], Named): """Handle events and update text on association end.""" for end in (self._head_end, self._tail_end): end.set_text() - self.request_update() - - def post_update(self, context): - """Update the shapes and sub-items of the association.""" - - handles = self.handles() # Update line endings: head_subject = self.head_subject @@ -181,14 +175,7 @@ class AssociationItem(LinePresentation[UML.Association], Named): self.draw_head = draw_default_head self.draw_tail = draw_default_tail - # update relationship after self.set calls to avoid circural updates - super().post_update(context) - - # Calculate alignment of the head name and multiplicity - self._head_end.post_update(context, handles[0].pos, handles[1].pos) - - # Calculate alignment of the tail name and multiplicity - self._tail_end.post_update(context, handles[-1].pos, handles[-2].pos) + self.request_update() def point(self, x, y): """Returns the distance from the Association to the (mouse) cursor.""" @@ -198,6 +185,15 @@ class AssociationItem(LinePresentation[UML.Association], Named): def draw(self, context): super().draw(context) + + handles = self.handles() + + # Calculate alignment of the head name and multiplicity + self._head_end.update_position(context, handles[0].pos, handles[1].pos) + + # Calculate alignment of the tail name and multiplicity + self._tail_end.update_position(context, handles[-1].pos, handles[-2].pos) + self._head_end.draw(context) self._tail_end.draw(context) if self.show_direction: @@ -376,7 +372,7 @@ class AssociationEnd: def get_mult(self): return self._mult - def post_update(self, context, p1, p2): + def update_position(self, context, p1, p2): """Update label placement for association's name and multiplicity label.
Generalize setting the specified function Use a function to set up the wrapper function and call it properly.
@@ -108,23 +108,38 @@ except DbusClientGenerationError as err: # pragma: no cover ) from err -try: - orig_method = Manager.Methods.CreatePool # pylint: disable=invalid-name +def _add_abs_path_assertion(klass, method_name, key): + """ + Set method_name of method_klass to a new method which checks that the + device paths values at key are absolute paths. + + :param klass: the klass to which this metthod belongs + :param str method_name: the name of the method + :param str key: the key at which the paths can be found in the arguments + """ + method_class = getattr(klass, "Methods") + orig_method = getattr(method_class, method_name) def new_method(proxy, args): """ New CreatePool method """ - rel_paths = [path for path in args["devices"] if not os.path.isabs(path)] + rel_paths = [path for path in args[key] if not os.path.isabs(path)] assert ( rel_paths == [] ), "Precondition violated: paths %s should be absolute" % ", ".join(rel_paths) return orig_method(proxy, args) - Manager.Methods.CreatePool = new_method + setattr(method_class, method_name, new_method) + + +try: + _add_abs_path_assertion(Manager, "CreatePool", "devices") + except AttributeError as err: # pragma: no cover - # This can only happen if CreatePool is missing from the XML spec or - # code generation has a bug, we will never test for these conditions. + # This can only happen if the expected method is missing from the XML spec + # or code generation has a bug, we will never test for these conditions. raise StratisCliGenerationError( - "Malformed class definition; could not access Manager.Methods.CreatePool" + "Malformed class definition; could not access a class or method in " + "the generated class definition" ) from err
Update to Prometheus 2.13.1 [BUGFIX] Fix panic in ARM builds of Prometheus. [BUGFIX] promql: fix potential panic in the query logger. [BUGFIX] Multiple errors of http: superfluous response.WriteHeader call in the logs.
%define debug_package %{nil} Name: prometheus2 -Version: 2.13.0 +Version: 2.13.1 Release: 1%{?dist} Summary: The Prometheus 2.x monitoring system and time series database. License: ASL 2.0
client2: rendering: refactor show_html This patch moves all patch related code into `_apply_patch` to make the code more readable. This patch does not contain any functional changes.
@@ -589,7 +589,10 @@ export class LonaRenderingEngine { _apply_patch(patch) { var patch_type = patch[1]; - if(patch_type == Lona.protocol.PATCH_TYPE.NODES) { + if(patch_type == Lona.protocol.PATCH_TYPE.WIDGET_DATA) { + this._apply_patch_to_widget_data(patch); + + } else if(patch_type == Lona.protocol.PATCH_TYPE.NODES) { this._apply_patch_to_child_nodes(patch); } else { @@ -628,16 +631,7 @@ export class LonaRenderingEngine { this._widgets_to_update = []; data.forEach(patch => { - var patch_type = patch[1]; - - if(patch_type == Lona.protocol.PATCH_TYPE.WIDGET_DATA) { - this._apply_patch_to_widget_data(patch); - - } else { this._apply_patch(patch); - - }; - this._clean_node_cache(); }); };
Update __init__.py Clean logging details
@@ -10,9 +10,6 @@ except Exception: import logging log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -# log.info("Welcome ! this is a INFO msg") -# log.debug("Welcome ! this is a DEBUG msg") -# log.warning("Welcome ! this is a WARNING msg") # Import facades: from .fetchers import ArgoDataFetcher as DataFetcher # noqa: F401 isort:skip
Patches MapForwardSimulator.create_layout to work when max_cache_size is None. An oversight in the logic in nested `approx_cache_mem_estimate` caused an exception to be raised with max_cache_size is None. Fixed now.
@@ -142,7 +142,9 @@ class MapForwardSimulator(_DistributableForwardSimulator, SimpleMapForwardSimula return _cache_mem(max_cache_size, blk1, blk2) def approx_cache_mem_estimate(nc, np1, np2, n_comms): - approx_cache_size = min((len(circuits) / nc) * 0.7, self._max_cache_size) + approx_cache_size = (len(circuits) / nc) * 0.7 + if self._max_cache_size is not None: + approx_cache_size = min(approx_cache_size, self._max_cache_size) return _cache_mem(approx_cache_size, num_params / np1, num_params / np2) cmem = cache_mem_estimate(nc, np1, np2, Ng) # initial estimate (to screen)
Last few Flake8 complaints Finished cleaning up the last few flake8 complaints in profile.py
@@ -8,6 +8,7 @@ from naomi import paths import os from . import populate import re +import shutil import yaml _profile = {} @@ -40,8 +41,12 @@ def get_profile(command=""): try: os.makedirs(paths.SUB_PATH) except OSError: - _logger.error("Could not create .naomi dir: '%s'", - paths.SUB_PATH, exc_info=True) + _logger.error( + "Could not create .naomi dir: '{}'".format( + paths.SUB_PATH + ), + exc_info=True + ) raise # Check if .naomi dir is writable @@ -59,8 +64,12 @@ def get_profile(command=""): try: os.makedirs(paths.CONFIG_PATH) except OSError: - _logger.error("Could not create .naomi/configs dir: '%s'", - paths.CONFIG_PATH, exc_info=True) + _logger.error( + "Could not create .naomi/configs dir: '{}'".format( + paths.CONFIG_PATH + ), + exc_info=True + ) raise # Check if .naomi/configs dir is writable @@ -133,8 +142,12 @@ def get_profile(command=""): print("Cannot continue. Exiting.") quit() except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e: - _logger.error("Unable to parse config file: %s %s", - e.problem.strip(), str(e.problem_mark).strip()) + _logger.error( + "Unable to parse config file: {} {}".format( + e.problem.strip(), + str(e.problem_mark).strip() + ) + ) raise configfile = paths.config('profile.yml') with open(configfile, "r") as f:
Update build.sh cudnn7 folder is no available
@@ -143,7 +143,7 @@ build_local(){ elif [[ ${build_ver} == "gpu" ]]; then echo "building ESPnet GPU Image with ubuntu:${ubuntu_ver} and cuda:${cuda_ver}" if [ "${build_base_image}" = true ] ; then - docker build -f prebuilt/devel/gpu/${ver}/cudnn7/Dockerfile -t espnet/espnet:cuda${ver}-cudnn7 . || exit 1 + docker build -f prebuilt/devel/gpu/${ver}/Dockerfile -t espnet/espnet:cuda${ver}-cudnn7 . || exit 1 else if ! [[ -n $( docker images -q espnet/espnet:cuda-latest) ]]; then docker pull espnet/espnet:cuda-latest
add removing 'on_return' handler on channel close (fix memory leak) Because aiormq Channel keeps reference to the 'on_return' method of the aio-pika Channel, the Garbage Collector cannot free the Channel object which causes memory leakage.
@@ -184,6 +184,9 @@ class Channel(ChannelContext): async def _on_close(self, closing: asyncio.Future) -> None: await self.close_callbacks(closing.exception()) + if self._channel and self._channel.channel: + self._channel.channel.on_return_callbacks.discard(self._on_return) + async def _on_initialized(self) -> None: self.channel.on_return_callbacks.add(self._on_return)
add juju debug output for bootstrap if --debug exists Fixes
@@ -175,7 +175,11 @@ def bootstrap(controller, cloud, series="xenial", credential=None): cmd += "--bootstrap-series={} ".format(series) if cloud != "localhost": cmd += "--credential {} ".format(credential) + + if app.argv.debug: + cmd += "--debug" app.log.debug("bootstrap cmd: {}".format(cmd)) + try: pathbase = os.path.join(app.config['spell-dir'], '{}-bootstrap').format(app.current_controller)
[LongT5] Remove duplicate encoder_attention_mask default value check Remove duplicate encoder_attention_mask default value assignment
@@ -1449,11 +1449,6 @@ class LongT5Stack(LongT5PreTrainedModel): if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) - if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: - encoder_seq_length = encoder_hidden_states.shape[1] - encoder_attention_mask = torch.ones( - batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long - ) # initialize past_key_values with `None` if past does not exist if past_key_values is None:
Add missing dependencies for travis and fix X11 error The travis build lacks of some dependencies needed to perform all the tests: gdata and feedparser. Also fix X11 error when trying to import gstreamer.
@@ -15,6 +15,13 @@ before_install: - sudo apt-get install --yes gstreamer1.0-alsa gstreamer1.0-plugins-bad gstreamer1.0-plugins-base gstreamer1.0-plugins-base-apps gstreamer1.0-plugins-good gstreamer1.0-plugins-ugly gstreamer1.0-libav # Dependencies for gi.repository: Gst, GObject, Cairo - sudo apt-get install -y libgirepository1.0-dev libcairo2-dev gir1.2-gtk-3.0 gobject-introspection python3-gi python3-gi-cairo gir1.2-gtk-3.0 python3-gst-1.0 + # Tricks to avoid gstreamer error about X11: + # 'Gtk3 requires X11, and no DISPLAY environment variable is set' + # http://docs.travis-ci.com/user/gui-and-headless-browsers/#Starting-a-Web-Server + - sudo apt-get install -y xvfb + - "export DISPLAY=:99.0" + - "sh -e /etc/init.d/xvfb start" + - sleep 3 install: - pip install --upgrade setuptools - pip install --upgrade cython @@ -25,6 +32,8 @@ install: - pip install -e git+https://github.com/JonnyJD/python-discid@master#egg=discid --no-dependencies - pip install git+git://github.com/opacam/epsilon@python3#egg=Epsilon - pip install git+git://github.com/opacam/axiom@python3#egg=Axiom + - pip install git+https://github.com/dvska/gdata-python3#egg=gdata + - pip install feedparser - pip install livestreamer - pip install service_identity - pip install python-coveralls==2.9.1
add format fields 'compiler', 'hostname', 'user' in buildtest report command. Alphabetize format fields so that buildtest report --helpformat will show that by field name
@@ -21,22 +21,25 @@ class Report: # all format fields available for --helpformat format_fields = [ "buildspec", - "name", - "id", - "full_id", - "testroot", - "testpath", "command", - "outfile", + "compiler", + "endtime", "errfile", - "schemafile", "executor", - "tags", - "starttime", - "endtime", + "full_id", + "hostname", + "id", + "name", + "outfile", "runtime", - "state", "returncode", + "schemafile", + "starttime", + "state", + "tags", + "testroot", + "testpath", + "user", ] filter_fields = ["buildspec", "name", "executor", "state", "tags", "returncode"] @@ -280,23 +283,30 @@ class Report: """Implements command ``buildtest report --helpformat``""" format_table = [ ["buildspec", "Buildspec file"], - ["name", "Name of test defined in buildspec"], - ["id", "Unique Build Identifier (abbreviated)"], - ["full_id", "Full qualified unique build identifier"], - ["testroot", "Root of test directory"], - ["testpath", "Path to test"], ["command", "Command executed"], - ["outfile", "Output file"], + [ + "compiler", + "Retrieve compiler used for test (applicable for compiler schema)", + ], + ["endtime", "End Time for Test in date format"], ["errfile", "Error File"], - ["schemafile", "Schema file used for validation"], ["executor", "Executor name"], - ["tags", "Tag name"], - ["starttime", "Start Time of test in date format"], - ["endtime", "End Time for Test in date format"], + ["hostname", "Retrieve hostname of machine where job was submitted from"], + ["full_id", "Full qualified unique build identifier"], + ["id", "Unique Build Identifier (abbreviated)"], + ["name", "Name of test defined in buildspec"], + ["outfile", "Output file"], + ["returncode", "Return Code from Test Execution"], ["runtime", "Total runtime in seconds"], + ["schemafile", "Schema file used for validation"], + ["starttime", "Start Time of test in date format"], ["state", "Test State reported by buildtest (PASS/FAIL)"], - ["returncode", "Return Code from Test Execution"], + ["tags", "Tag name"], + ["testroot", "Root of test directory"], + ["testpath", "Path to test"], + ["user", "Get user who submitted job"], ] + headers = ["Fields", "Description"] table = [] if os.getenv("BUILDTEST_COLOR") == "True":
Update field.html template Change position of help_texts and errors for all form fields. Additionally move around (add margins and what-not) to subfields.
{% include 'bootstrap4/layout/field_file.html' %} {% else %} <div class="{{ field_class }}"> - {% crispy_field field %} {% include 'bootstrap4/layout/help_text_and_errors.html' %} + {% crispy_field field %} {% if field.field.widget.subfield %} {% with subfield=field.field.widget.subfield %} + {% if field.help_text %} + {% crispy_field subfield 'class' 'form-control-sm mt-2' %} + {% else %} {% crispy_field subfield 'class' 'form-control-sm' %} + {% endif %} {% endwith %} {% elif field.field.widget.subfields %} <div class="row mb-3"> {% for f in field.field.widget.subfields %} - {% with subfield=f.0 placeholder=f.1 %} + {% with superfield=field field=f.0 placeholder=f.1 %} + {% if superfield.help_text %} + <div class="col mt-2"> + {% else %} <div class="col"> - {% crispy_field subfield 'class' 'form-control-sm' 'placeholder' placeholder %} + {% endif %} + {% include 'bootstrap4/layout/help_text_and_errors.html' %} + {% crispy_field field 'class' 'form-control-sm' 'placeholder' placeholder %} </div> {% endwith %} {% endfor %}
Improve PrintableErrorField formatting clarity Minor reordering, and make the format method return its result (rather than changing an attribute in place).
@@ -13,29 +13,29 @@ class PrintableErrorField(object): TEXT_PREFIX = 'Globus CLI Error:' def __init__(self, name, value, multiline=False): + self.multiline = multiline self.name = safe_stringify(name) self.raw_value = safe_stringify(value) - self.value = self.raw_value - self.multiline = multiline - self._format_value() + self.value = self._format_value(self.raw_value) @property def _text_prefix_len(self): return len(self.TEXT_PREFIX) - def _format_value(self): + def _format_value(self, val): """ - formats self.value to be good for textmode printing - self.value must be unicode before this is called, and will remain so + formats a value to be good for textmode printing + val must be unicode """ name = self.name + ':' - if not self.multiline or '\n' not in self.value: - self.value = u'{0} {1}'.format(name.ljust(self._text_prefix_len), - self.value) + if not self.multiline or '\n' not in val: + val = u'{0} {1}'.format(name.ljust(self._text_prefix_len), val) else: spacer = '\n' + ' '*(self._text_prefix_len + 1) - self.value = u'{0}{1}{2}'.format( - name, spacer, spacer.join(self.value.split('\n'))) + val = u'{0}{1}{2}'.format( + name, spacer, spacer.join(val.split('\n'))) + + return val def write_error_info(error_name, fields, message=None):
ebd/eapi/2/src_configure.bash: Fix typo in econf --libdir logic Fixes
@@ -26,7 +26,7 @@ econf() { [[ ${CONF_PREFIX} != /* ]] && CONF_PREFIX=/${CONF_PREFIX} elif [[ $* == *"--prefix="* ]]; then local args=$(echo $*) - local -a pref=( $(echo ${args/*--prefix[= ]}) ) + local -a prefix=( $(echo ${args/*--prefix[= ]}) ) CONF_PREFIX=${prefix/--*} [[ ${CONF_PREFIX} != /* ]] && CONF_PREFIX=/${CONF_PREFIX} else
TrainingRequestUpdateForm: Disable `score_auto` field Admins won't be able to alter field's score calculated by AMY.
@@ -1362,6 +1362,12 @@ class TrainingRequestUpdateForm(forms.ModelForm): widget=ModelSelect2(url='person-lookup') ) + score_auto = forms.IntegerField( + disabled=True, + label=TrainingRequest._meta.get_field('score_auto').verbose_name, + help_text=TrainingRequest._meta.get_field('score_auto').help_text, + ) + helper = BootstrapHelper(duplicate_buttons_on_top=True, submit_label='Update')
issue refactor Connection to support reset() Now the tests pass.
@@ -550,7 +550,7 @@ class Connection(ansible.plugins.connection.ConnectionBase): self.host_vars = task_vars['hostvars'] self.delegate_to_hostname = delegate_to_hostname self.loader_basedir = loader_basedir - self.close(new_task=True) + self._reset(mode='put') def get_task_var(self, key, default=None): if self._task_vars and key in self._task_vars: @@ -709,6 +709,20 @@ class Connection(ansible.plugins.connection.ConnectionBase): self.get_chain().call_no_reply(os.mkdir, self._shell.tmpdir) return self._shell.tmpdir + def _reset_tmp_path(self): + """ + Called by _reset(); ask the remote context to delete any temporary + directory created for the action. CallChain is not used here to ensure + exception is logged by the context on failure, since the CallChain + itself is about to be destructed. + """ + if getattr(self._shell, 'tmpdir', None) is not None: + self.context.call_no_reply( + ansible_mitogen.target.prune_tree, + self._shell.tmpdir, + ) + self._shell.tmpdir = None + def _connect(self): """ Establish a connection to the master process's UNIX listener socket, @@ -727,25 +741,22 @@ class Connection(ansible.plugins.connection.ConnectionBase): stack = self._build_stack() self._connect_stack(stack) - def close(self, new_task=False): + def _reset(self, mode): """ - Arrange for the mitogen.master.Router running in the worker to - gracefully shut down, and wait for shutdown to complete. Safe to call - multiple times. + Forget everything we know about the connected context. + + :param str mode: + Name of ContextService method to use to discard the context, either + 'put' or 'reset'. """ - if getattr(self._shell, 'tmpdir', None) is not None: - # Avoid CallChain to ensure exception is logged on failure. - self.context.call_no_reply( - ansible_mitogen.target.prune_tree, - self._shell.tmpdir, - ) - self._shell.tmpdir = None + if not self.context: + return - if self.context: + self._reset_tmp_path() self.chain.reset() self.parent.call_service( service_name='ansible_mitogen.services.ContextService', - method_name='put', + method_name=mode, context=self.context ) @@ -753,12 +764,30 @@ class Connection(ansible.plugins.connection.ConnectionBase): self.login_context = None self.init_child_result = None self.chain = None - if self.broker and not new_task: + + def close(self): + """ + Arrange for the mitogen.master.Router running in the worker to + gracefully shut down, and wait for shutdown to complete. Safe to call + multiple times. + """ + self._reset(mode='put') + if self.broker: self.broker.shutdown() self.broker.join() self.broker = None self.router = None + def reset(self): + """ + Explicitly terminate the connection to the remote host. This discards + any local state we hold for the connection, returns the Connection to + the 'disconnected' state, and informs ContextService the connection is + bad somehow, and should be shut down and discarded. + """ + self._connect() + self._reset(mode='reset') + def get_chain(self, use_login=False, use_fork=False): """ Return the :class:`mitogen.parent.CallChain` to use for executing
Update evoked.py Including the hint to the appropriate file ending to the docstring of save definition. Matches to
@@ -171,7 +171,8 @@ class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin, Parameters ---------- fname : string - Name of the file where to save the data. + The name of the file, which should end with -ave.fif or + -ave.fif.gz. Notes -----
fail longpolling endpoint after 45 seconds. this should fix a possible bug in which connections are left open forever or worse.
@@ -32,6 +32,24 @@ async def api_public_payment_longpolling(payment_hash): print("adding standalone invoice listener", payment_hash, send_payment) api_invoice_listeners.append(send_payment) + response = None + + async def payment_info_receiver(cancel_scope): async for payment in receive_payment: if payment.payment_hash == payment_hash: - return jsonify({"status": "paid"}), HTTPStatus.OK + nonlocal response + response = (jsonify({"status": "paid"}), HTTPStatus.OK) + cancel_scope.cancel() + + async def timeouter(cancel_scope): + await trio.sleep(45) + cancel_scope.cancel() + + async with trio.open_nursery() as nursery: + nursery.start_soon(payment_info_receiver, nursery.cancel_scope) + nursery.start_soon(timeouter, nursery.cancel_scope) + + if response: + return response + else: + return jsonify({"message": "timeout"}), HTTPStatus.REQUEST_TIMEOUT
Fixing formatting for timedelta. Now it will only show the amount of days. Quality of Life: Also show `day` instead of `days` when it's just 1 day.
@@ -205,8 +205,8 @@ class Defcon(Cog): msg = f"{Emojis.defcon_disabled} DEFCON disabled.\n\n" elif action is Action.UPDATED: msg = ( - f"{Emojis.defcon_updated} DEFCON days updated; accounts must be {self.days} " - "days old to join the server.\n\n" + f"{Emojis.defcon_updated} DEFCON days updated; accounts must be {self.days.days} " + f"day{'s' if self.days.days > 1 else ''} old to join the server.\n\n" ) if e:
UPDATE API route create all the database tables seed_db : Create Default roles
@@ -7,6 +7,10 @@ from sqlalchemy.orm import Session from app import crud, models, schemas from app.api import deps +from app.db.session import engine,SessionLocal + +from syft.core.node.common.tables import Base +from syft.core.node.common.tables.utils import seed_db router = APIRouter() @@ -17,8 +21,9 @@ from syft.core.common.message import ( import syft as sy -domain = sy.Domain("my domain") - +domain = sy.Domain("my domain", db_engine=engine) +Base.metadata.create_all(engine) +seed_db(SessionLocal()) @router.get("/", response_model=str) def read_items(
Add a default onnx opset function for tf exports Test Plan: `tests/tensorflow/utils` passes, running tensorflow tests as well async Reviewers: mark.kurtz, dhuang, tuan, kevinaer, mgoin, alexm Subscribers: #core
@@ -6,6 +6,7 @@ from typing import List, Dict, Union import os from collections import OrderedDict import numpy +import onnx from neuralmagicML.utils import ( clean_path, @@ -17,7 +18,11 @@ from neuralmagicML.tensorflow.utils.helpers import tf_compat, tensors_export from neuralmagicML.tensorflow.utils.variable import clean_tensor_name -__all__ = ["GraphExporter"] +__all__ = ["default_onnx_opset", "GraphExporter"] + + +def default_onnx_opset() -> int: + return 9 if onnx.__version__ < "1.6" else 11 class GraphExporter(object): @@ -61,7 +66,7 @@ class GraphExporter(object): outputs: List[Union[str, tf_compat.Tensor]], pb_path: str, onnx_path: str, - opset: int = 11, + opset: int = default_onnx_opset(), custom_op_handlers=None, extra_opset=None, shape_override: Dict[str, List] = None,
build_scripts: Only copy `win_code_sign_cert.p12` if we have secrets PRs from forked `chia-blockchain` repos don't have the cert available which leads to the installer step to fail, see Follow-up for
@@ -89,7 +89,9 @@ Write-Output " ---" Copy-Item "dist\daemon" -Destination "..\chia-blockchain-gui\packages\gui\" -Recurse Set-Location -Path "..\chia-blockchain-gui" -PassThru # We need the code sign cert in the gui subdirectory so we can actually sign the UI package +If ($env:HAS_SECRET) { Copy-Item "win_code_sign_cert.p12" -Destination "packages\gui\" +} git status
fix: dont pass doc to tooltip formatter on reportview This just doesn't exist. Also filtering doc by value makes no sense, it's bound to be incorrect or misleading.
@@ -536,7 +536,6 @@ frappe.views.ReportView = class ReportView extends frappe.views.ListView { this.last_chart_type = args.chart_type; const get_df = (field) => this.columns_map[field].docfield; - const get_doc = (value, field) => this.data.find((d) => d[field] === value); this.$charts_wrapper.removeClass("hidden"); @@ -551,13 +550,12 @@ frappe.views.ReportView = class ReportView extends frappe.views.ListView { numberFormatter: frappe.utils.format_chart_axis_number, }, tooltipOptions: { - formatTooltipY: (value) => - frappe.format( - value, - get_df(this.chart_args.y_axes[0]), - { always_show_decimals: true, inline: true }, - get_doc(value.doc) - ), + formatTooltipY: (value) => { + return frappe.format(value, get_df(this.chart_args.y_axes[0]), { + always_show_decimals: true, + inline: true, + }); + }, }, }); }
fix: allow devices with notification capability Added capability check for TIMERS_AND_ALARMS and REMINDERS. Include_devices will also override the capability check. closes
@@ -406,10 +406,15 @@ async def setup_alexa(hass, config_entry, login_obj: AlexaLogin): continue if ( - device.get("capabilities") - and "MUSIC_SKILL" not in device["capabilities"] + dev_name not in include_filter + and device.get("capabilities") + and not any( + x in device["capabilities"] + for x in ["MUSIC_SKILL", "TIMERS_AND_ALARMS", "REMINDERS"] + ) ): - # skip devices without music skill + # skip devices without music or notification skill + _LOGGER.debug("Excluding %s for lacking capability", dev_name) continue if "bluetoothStates" in bluetooth:
explictly specify version in docker-compose.yaml docker-compose will re-use old builds rather than rebuilding, so be explicit about what version you want.
@@ -38,7 +38,7 @@ services: build: context: . dockerfile: Dockerfile.gauge - image: 'faucet/gauge:latest' + image: 'faucet/gauge:1.5.3' environment: - GAUGE_CONFIG=/etc/ryu/faucet/gauge.yaml volumes: @@ -55,11 +55,10 @@ services: build: context: . dockerfile: Dockerfile - image: 'faucet/faucet:latest' + image: 'faucet/faucet:1.5.3' volumes: - '/var/log/ryu/faucet:/var/log/ryu/faucet' - '/etc/ryu/faucet:/etc/ryu/faucet' ports: - '6653:6653' - '9244:9244' -
Move add new default stream box to top. Fixes
<p>{{#tr this}}Configure the default streams new users are subscribed to when joining your organization.{{/tr}}</p> </div> + {{#if is_admin}} + <form class="form-horizontal default-stream-form"> + <div class="add-new-default-stream-box grey-bg"> + <div class="new-default-stream-section-title">{{t "Add new default stream" }}</div> + <div class="control-group" id="default_stream_inputs"> + <label for="default_stream_name" class="control-label">{{t "Stream name" }}</label> + <input class="create_default_stream" type="text" placeholder="{{t "Stream name" }}" name="stream_name" autocomplete="off"></input> + </div> + </div> + </form> + {{/if}} + <div class="progressive-table-wrapper"> <table class="table table-condensed table-striped"> <thead> </div> <div id="admin_page_default_streams_loading_indicator"></div> - {{#if is_admin}} - <form class="form-horizontal default-stream-form"> - <div class="add-new-default-stream-box grey-bg"> - <div class="new-default-stream-section-title">{{t "Add new default stream" }}</div> - <div class="control-group" id="default_stream_inputs"> - <label for="default_stream_name" class="control-label">{{t "Stream name" }}</label> - <input class="create_default_stream" type="text" placeholder="{{t "Stream name" }}" name="stream_name" autocomplete="off"></input> - </div> - </div> - </form> - {{/if}} </div>
AC: callback for getting annotation and prediction for visualization * AC: remove deprecated pipelined mode * Revert "AC: remove deprecated pipelined mode" This reverts commit * AC: callback for getting annotation and prediction for visualizatio n
@@ -208,15 +208,12 @@ class ModelEvaluator(BaseEvaluator): if self.dataset.batch is None: self.dataset.batch = self.launcher.batch - raw_outputs_callback = kwargs.get('output_callback') + output_callback = kwargs.get('output_callback') predictions_to_store = [] for batch_id, (batch_input_ids, batch_annotation) in enumerate(self.dataset): filled_inputs, batch_meta, batch_identifiers = self._get_batch_input(batch_annotation) batch_predictions = self.launcher.predict(filled_inputs, batch_meta, **kwargs) - if raw_outputs_callback: - raw_outputs_callback( - batch_predictions, network=self.launcher.network, exec_network=self.launcher.exec_network - ) + if self.adapter: self.adapter.output_blob = self.adapter.output_blob or self.launcher.output_blob batch_predictions = self.adapter.process(batch_predictions, batch_identifiers, batch_meta) @@ -226,6 +223,8 @@ class ModelEvaluator(BaseEvaluator): annotations, predictions = self.postprocessor.process_batch(batch_annotation, batch_predictions, batch_meta) self.metric_executor.update_metrics_on_batch(batch_input_ids, annotations, predictions) + if output_callback: + output_callback(annotations, predictions) if self.metric_executor.need_store_predictions: self._annotations.extend(annotations)