message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Change from compute to infra
fix the typo | @@ -49,7 +49,7 @@ needed in an environment, it is possible to create additional nodes.
# /opt/openstack-ansible/scripts/inventory-manage.py \
-f /opt/openstack-ansible/playbooks/inventory/dynamic_inventory.py \
- -l |awk -F\| '/<NEW COMPUTE NODE>/ {print $2}' |sort -u | tee /root/add_host.limit
+ -l |awk -F\| '/<NEW INFRA NODE>/ {print $2}' |sort -u | tee /root/add_host.limit
#. Run the ``setup-everything.yml`` playbook with the
``limit`` argument.
|
Updates to Analysis v2 base analysis
Option to not apply default figure settings
Option to close figures (or not)
Fix for bug in saving plots | @@ -11,8 +11,10 @@ from matplotlib import cm
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.utilities.general import NumpyJsonEncoder
from pycqed.analysis.analysis_toolbox import get_color_order as gco
+from pycqed.analysis.analysis_toolbox import get_color_list
from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel
-import pycqed.analysis_v2.default_figure_settings_analysis as def_fig
+# import pycqed.analysis_v2.default_figure_settings_analysis as def_fig
+from . import default_figure_settings_analysis as def_fig
from mpl_toolkits.axes_grid1 import make_axes_locatable
import datetime
import json
@@ -136,6 +138,7 @@ class BaseDataAnalysis(object):
########################################
# These options relate to the plotting #
########################################
+ if self.options_dict.get('apply_default_fig_settings', True):
def_fig.apply_default_figure_settings()
self.plot_dicts = dict()
self.axs = dict()
@@ -151,6 +154,8 @@ class BaseDataAnalysis(object):
False)
self.options_dict['save_figs'] = self.options_dict.get(
'save_figs', True)
+ self.options_dict['close_figs'] = self.options_dict.get(
+ 'close_figs', True)
####################################################
# These options relate to what analysis to perform #
####################################################
@@ -178,7 +183,7 @@ class BaseDataAnalysis(object):
self.prepare_plots() # specify default plots
if not self.extract_only:
self.plot(key_list='auto') # make the plots
- self.save_figures()
+ self.save_figures(close_figs=self.options_dict['close_figs'])
def get_timestamps(self):
"""
@@ -249,7 +254,7 @@ class BaseDataAnalysis(object):
# this disables the data extraction for other files if there is only
# one file being used to load data from
if self.single_timestamp:
- self.timestamps = [self.timestamps[0]] # Why???
+ self.timestamps = [self.timestamps[0]]
TwoD = self.params_dict.pop('TwoD', False)
# this should always be extracted as it is used to determine where
# the file is as required for datasaving
@@ -348,6 +353,8 @@ class BaseDataAnalysis(object):
close_figs: bool=True):
if savedir is None:
savedir = self.data_dict.get('folder', '')
+ if isinstance(savedir, list):
+ savedir = savedir[0]
if savebase is None:
savebase = ''
if tag_tstamp:
@@ -615,8 +622,7 @@ class BaseDataAnalysis(object):
if cmap == 'Vega10':
colors = [cm.Vega10(i) for i in range(len(plot_yvals))]
else:
- colors = [cm.get_cmap(cmap)(i)
- for i in np.linspace(0.0, 1.0, len_color_cycle)]
+ colors = get_color_list(len_color_cycle, cmap)
for ii, this_yvals in enumerate(plot_yvals):
p_out.append(pfunc(plot_xvals, this_yvals,
|
config/core: Rename `each_iteration` RebootPolicy to `each_job`
Rename the parameter to be clearer on the effect of the policy, as this
will cause WA to reset the device for each new job it runs regardless of
the iteration number. | @@ -58,7 +58,7 @@ class RebootPolicy(object):
"""
- valid_policies = ['never', 'as_needed', 'initial', 'each_iteration']
+ valid_policies = ['never', 'as_needed', 'initial', 'each_job']
@staticmethod
def from_pod(pod):
@@ -82,8 +82,8 @@ class RebootPolicy(object):
return self.policy not in ['never', 'as_needed']
@property
- def reboot_on_each_iteration(self):
- return self.policy == 'each_iteration'
+ def reboot_on_each_job(self):
+ return self.policy == 'each_job'
def __str__(self):
return self.policy
@@ -597,8 +597,8 @@ class RunConfiguration(Configuration):
The device will be rebooted when the execution first starts,
just before executing the first workload spec.
- ``"each_iteration"``
- The device will be rebooted before each new iteration.
+ ``"each_job"``
+ The device will be rebooted before each new job.
'''),
ConfigurationPoint(
'device',
|
popovers: Rename "Move message" option to "Move messages".
This commit renames "Move message" option to "Move messages"
to make it more clear that the user can move multiple messages. | @@ -543,10 +543,10 @@ export function toggle_actions_popover(element, id) {
if (editability === message_edit.editability_types.FULL) {
editability_menu_item = $t({defaultMessage: "Edit message"});
if (message.is_stream) {
- move_message_menu_item = $t({defaultMessage: "Move message"});
+ move_message_menu_item = $t({defaultMessage: "Move messages"});
}
} else if (can_move_message) {
- move_message_menu_item = $t({defaultMessage: "Move message"});
+ move_message_menu_item = $t({defaultMessage: "Move messages"});
view_source_menu_item = $t({defaultMessage: "View message source"});
} else {
view_source_menu_item = $t({defaultMessage: "View message source"});
|
ENH: add warning for metadata default use
Set a warning if metadata for variables being set using no input from the Instrument routine. | @@ -2613,11 +2613,18 @@ class Instrument(object):
self.meta = meta
# If only some metadata included, define the remaining variables
+ warn_default = False
for var in self.variables:
- case_var = meta.var_case_name(var)
- if case_var not in self.meta.keys() \
- and case_var not in self.meta.keys_nD():
- self.meta[case_var] = {self.labels.name: var}
+ if var not in self.meta:
+ default_warn = "".join(["Metadata set to defaults, as",
+ " they were missing in the ",
+ "Instrument"])
+ warn_default = True
+ self.meta[case_var] = {self.labels.name: var,
+ self.labels.notes: default_warn}
+
+ if warn_default:
+ warnings.warn(default_warn, stacklevel=2)
# check if load routine actually returns meta
if self.meta.data.empty:
|
TPU Embedding load/retrieve parameters ops need to be declared in the
outer graph scope with control flow v2. | @@ -2044,9 +2044,13 @@ class TPUEmbeddingTable(base_layer.BaseLayer):
# Only the Trainer needs these ops.
if py_utils.use_tpu():
+ # TPU Embedding load/retrieve ops need to be in the outer graph
+ # scope.
+ with tf.init_scope():
tf.logging.info('creating load and retrieve ops.')
load_parameters_op = (
- tpu_embedding_lib.tpu_ops.load_tpu_embedding_adagrad_parameters(
+ tpu_embedding_lib.tpu_ops
+ .load_tpu_embedding_adagrad_parameters(
parameters=embedding_var,
accumulators=accumulator_var,
table_name=self._table_name,
|
make: add mostlyclean target
Close | -.PHONY: docs
+.PHONY: docs test lint binaries gh-pages
all: fava/static/gen/app.js
fava/static/gen/app.js: fava/static/sass/* fava/static/javascript/*
cd fava/static; npm update; npm run build
-clean:
- rm -rf .tox
+clean: mostlyclean
rm -rf build dist
+ rm -rf fava/static/gen
+
+mostlyclean:
+ rm -rf .tox
rm -rf fava/static/node_modules
+ find . -type f -name '*.py[c0]' -delete
+ find . -type d -name "__pycache__" -delete
lint:
tox -e lint
|
Fixed forecast time bug
Issue with converting forecast time when it was zero. Logic was changed
to account for this and now things work correctly.
Added nearest_time method for surface data for easy time matching. Just
a little wrapper around sfjson. | @@ -727,7 +727,7 @@ class GempakFile():
@staticmethod
def _convert_ftime(ftime):
"""Convert GEMPAK forecast time and type integer."""
- if ftime:
+ if ftime >= 0:
iftype = ForecastType(ftime // 100000)
iftime = ftime - iftype.value * 100000
hours = iftime // 100
@@ -2631,6 +2631,75 @@ class GempakSurface(GempakFile):
stations.append(station)
return stations
+ def nearest_time(self, date_time, station_id=None, station_number=None):
+ """Get nearest observation to given time for selected stations.
+
+ Parameters
+ ----------
+ date_time : datetime or array-like of datetime
+ Valid/observed datetime of the surface station. Alternatively
+ object or a string with the format YYYYmmddHHMM.
+
+ station_id : str or array-like of str
+ Station ID of the surface station.
+
+ station_number : int or array-like of int
+ Station number of the surface station.
+
+ Returns
+ -------
+ list
+ List of dicts/JSONs for each surface station.
+
+ Notes
+ -----
+ One of either station_id or station_number must be used. If both
+ are present, station_id will take precedence.
+ """
+ if isinstance(date_time, str):
+ date_time = datetime.strptime(date_time, '%Y%m%d%H%M')
+
+ if station_id is None and station_number is None:
+ raise ValueError('Must have either station_id or station_number')
+
+ if station_id is not None and station_number is not None:
+ station_number = None
+
+ if (station_id is not None
+ and (not isinstance(station_id, Iterable)
+ or isinstance(station_id, str))):
+ station_id = [station_id]
+ station_id = [c.upper() for c in station_id]
+
+ if station_number is not None and not isinstance(station_number, Iterable):
+ station_number = [station_number]
+ station_number = [int(sn) for sn in station_number]
+
+ time_matched = []
+ if station_id:
+ for stn in station_id:
+ matched = self.sfjson(station_id=stn)
+
+ nearest = min(
+ matched,
+ key=lambda d: abs(d['properties']['date_time'] - date_time)
+ )
+
+ time_matched.append(nearest)
+
+ if station_number:
+ for stn in station_id:
+ matched = self.sfjson(station_number=stn)
+
+ nearest = min(
+ matched,
+ key=lambda d: abs(d['properties']['date_time'] - date_time)
+ )
+
+ time_matched.append(nearest)
+
+ return time_matched
+
def sfjson(self, station_id=None, station_number=None,
date_time=None, state=None, country=None):
"""Select surface stations and output as list of JSON objects.
|
Fix typo in guide configuration
seach to search | @@ -338,7 +338,7 @@ Environment Variable Configuration
``AWS_PROFILE``
The default profile to use, if any. If no value is specified, boto3
- will attempt to seach the shared credentials file and the config file
+ will attempt to search the shared credentials file and the config file
for the ``default`` profile.
``AWS_CONFIG_FILE``
|
refactor preproc, support dense in TumHistory layer
Summary: Pull Request resolved: | @@ -11,10 +11,27 @@ from caffe2.python.layers.layers import (
)
from future.utils import viewitems
import numpy as np
+from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
+
+def get_concatenated_feature_to_index(blobs_to_concat):
+ concat_feature_to_index = defaultdict(list)
+ start_pos = 0
+ for scalar in blobs_to_concat:
+ num_dims = scalar.dtype.shape[0]
+ if hasattr(scalar, 'metadata') \
+ and hasattr(scalar.metadata, 'feature_specs') \
+ and hasattr(scalar.metadata.feature_specs, 'feature_to_index') \
+ and isinstance(scalar.metadata.feature_specs.feature_to_index, dict): # noqa B950
+ for k, v in scalar.metadata.feature_specs.feature_to_index.items():
+ concat_feature_to_index[k].extend([start_pos + vi for vi in v])
+ start_pos += num_dims
+ return dict(concat_feature_to_index) if concat_feature_to_index.keys() else None
+
+
class Concat(ModelLayer):
"""
Construct Concat layer
@@ -95,6 +112,19 @@ class Concat(ModelLayer):
(np.float32, output_dims),
self.get_next_blob_reference('output'))
+ record_to_concat = input_record.fields.values()
+ concated_feature_to_index = get_concatenated_feature_to_index(
+ record_to_concat
+ )
+ if concated_feature_to_index:
+ metadata = schema.Metadata(
+ feature_specs=schema.FeatureSpec(
+ feature_to_index=concated_feature_to_index
+ )
+ )
+ self.output_schema.set_metadata(metadata)
+
+
def add_ops(self, net):
net.Concat(
self.input_record.field_blobs(),
|
Fix unittest MismatchError
Closes-Bug: | @@ -451,11 +451,10 @@ class TestSyncer(base.DbTestCase):
self._find_created_modified_unmodified_ids(
before_action_plans, after_action_plans))
- dummy_1_spec = [
- {'description': 'Dummy indicator', 'name': 'dummy',
- 'schema': jsonutils.dumps({'minimum': 0, 'type': 'integer'}),
- 'unit': '%'}]
- dummy_2_spec = []
+ dummy_1_spec = jsonutils.loads(
+ self.goal1_spec.serialize_indicators_specs())
+ dummy_2_spec = jsonutils.loads(
+ self.goal2_spec.serialize_indicators_specs())
self.assertEqual(
[dummy_1_spec, dummy_2_spec],
[g.efficacy_specification for g in after_goals])
|
Remove pre-1.10 warnings
The intro paragraph is outdated since the official release of
HDF5 1.10.0 more than one year ago. Remove the outdated
paragraphs and warnings. | @@ -5,21 +5,6 @@ Single Writer Multiple Reader (SWMR)
Starting with version 2.5.0, h5py includes support for the HDF5 SWMR features.
-The SWMR feature is not available in the current release (1.8 series) of HDF5
-library. It is planned to be released for production use in version 1.10. Until
-then it is available as an experimental prototype form from development snapshot
-version 1.9.178 on the
-`HDF Group ftp server <ftp://ftp.hdfgroup.uiuc.edu/pub/outgoing/SWMR/>`_ or the
-`HDF Group svn repository <http://svn.hdfgroup.uiuc.edu/hdf5/branches/revise_chunks>`_.
-
-.. Warning:: The SWMR feature is currently in prototype form and available for
- experimenting and testing. Please do not consider this a production
- quality feature until the HDF5 library is released as 1.10.
-
-.. Warning:: FILES PRODUCED BY THE HDF5 1.9.X DEVELOPMENT SNAPSHOTS MAY NOT BE
- READABLE BY OTHER VERSIONS OF HDF5, INCLUDING THE EXISTING 1.8
- SERIES AND ALSO 1.10 WHEN IT IS RELEASED.
-
What is SWMR?
-------------
|
fix: Enable set_open_count
perf: move set_open_count to after_refresh to avoid unnecessary calls | @@ -5,7 +5,6 @@ frappe.ui.form.Dashboard = class FormDashboard {
constructor(opts) {
$.extend(this, opts);
this.setup_dashboard_sections();
- this.set_open_count = frappe.utils.throttle(this.set_open_count, 500);
}
setup_dashboard_sections() {
@@ -179,7 +178,6 @@ frappe.ui.form.Dashboard = class FormDashboard {
return;
}
this.render_links();
- // this.set_open_count();
show = true;
}
@@ -206,6 +204,7 @@ frappe.ui.form.Dashboard = class FormDashboard {
$(this).removeClass('hidden');
}
});
+ this.set_open_count();
}
init_data() {
|
Fix typo in README
mange -> manage | @@ -75,7 +75,7 @@ You can find the addon manifests and/or scripts under `${SNAP}/actions/`, with `
- **storage**: Create a default storage class. This storage class makes use of the hostpath-provisioner pointing to a directory on the host. Persistent volumes are created under `${SNAP_COMMON}/default-storage`. Upon disabling this addon you will be asked if you want to delete the persistent volumes created.
- **ingress**: Create an ingress controller.
- **gpu**: Expose GPU(s) to microk8s by enabling the nvidia-docker runtime and nvidia-device-plugin-daemonset. Requires NVIDIA drivers to already be installed on the host system.
-- **istio**: Deploy the core [Istio](https://istio.io/) services. You can use the `microk8s.istioctl` command to mange your deployments.
+- **istio**: Deploy the core [Istio](https://istio.io/) services. You can use the `microk8s.istioctl` command to manage your deployments.
- **registry**: Deploy a docker private registry and expose it on `localhost:32000`. The storage addon will be enabled as part of this addon. To [use the registry](docs/registry.md) you can use the `microk8s.docker` command.
- **metrics-server**: Deploy the [Metrics Server](https://kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/#metrics-server).
|
Small namedtuple refactor
Plus order consistency because why not.. | @@ -197,11 +197,11 @@ class DigestAuth(Auth):
try:
realm = header_dict["realm"].encode()
nonce = header_dict["nonce"].encode()
- qop = header_dict["qop"].encode() if "qop" in header_dict else None
- opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None
algorithm = header_dict.get("algorithm", "MD5")
+ opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None
+ qop = header_dict["qop"].encode() if "qop" in header_dict else None
return _DigestAuthChallenge(
- realm=realm, nonce=nonce, qop=qop, opaque=opaque, algorithm=algorithm
+ realm=realm, nonce=nonce, algorithm=algorithm, opaque=opaque, qop=qop
)
except KeyError as exc:
message = "Malformed Digest WWW-Authenticate header"
@@ -296,17 +296,9 @@ class DigestAuth(Auth):
raise ProtocolError(message, request=request)
-class _DigestAuthChallenge:
- def __init__(
- self,
- realm: bytes,
- nonce: bytes,
- algorithm: str,
- opaque: typing.Optional[bytes] = None,
- qop: typing.Optional[bytes] = None,
- ) -> None:
- self.realm = realm
- self.nonce = nonce
- self.algorithm = algorithm
- self.opaque = opaque
- self.qop = qop
+class _DigestAuthChallenge(typing.NamedTuple):
+ realm: bytes
+ nonce: bytes
+ algorithm: str
+ opaque: typing.Optional[bytes]
+ qop: typing.Optional[bytes]
|
Link to baremetal API reference from patch_node
Add a link in the patch_node docstring to the corresponding baremetal
API operation, Update Node (PATCH /v1/nodes/{node_ident}). | @@ -280,6 +280,10 @@ class Proxy(proxy.Proxy):
being locked. However, when setting ``instance_id``, this is
a normal code and should not be retried.
+ See `Update Node
+ <https://developer.openstack.org/api-ref/baremetal/?expanded=update-node-detail#update-node>`_
+ for details.
+
:returns: The updated node.
:rtype: :class:`~openstack.baremetal.v1.node.Node`
"""
|
Fixed test for `fastanifile_parsed()`
This test was failing due to a rounding/representation error | @@ -12,6 +12,7 @@ from typing import List, NamedTuple, Tuple
import pandas as pd
import pytest
+import unittest
from pandas.util.testing import assert_frame_equal
@@ -50,7 +51,7 @@ def fastanifile_parsed(dir_fastani_in): # works
"""Example parsed fastANI file."""
return fastANIParsed(
dir_fastani_in / "ecoli_vs_shiga.fastani",
- ComparisonResult("ecoli.fna", "shiga.fna", 97.664, 1322, 1547),
+ ComparisonResult("ecoli.fna", "shiga.fna", 0.9766400000000001, 1322, 1547),
)
@@ -80,10 +81,13 @@ def fastani_cmds_four(path_file_four): # works
)
+assertions = unittest.TestCase("__init__")
+
+
def test_fastanifile_parsing(fastanifile_parsed): # works
"""Check parsing of test fastANI .fastani file."""
- result = fastani.parse_fastani_file(fastanifile_parsed.filename)[0]
- assert result == fastanifile_parsed.data
+ result = fastani.parse_fastani_file(fastanifile_parsed.filename)
+ assertions.assertEqual(result, fastanifile_parsed.data)
# Test fastANI command generation
|
Make the root generated package Pure
TN: | ## vim: filetype=makoada
-package ${ada_lib_name} is
+package ${ada_lib_name} with Pure is
## It is up to each Langkit user to update these constants to whatever
## appropriate.
|
CHANGES for 0.9.19 release
Test Plan: - | # Changelog
+## 0.9.19
+
+**New**
+
+- Improved error handling when the intermediate storage stores and retrieves objects.
+- New URL scheme in Dagit, with repository details included on all paths for pipelines, solids, and schedules
+- Relaxed constraints for the AssetKey constructor, to enable arbitrary strings as part of the key path.
+- When executing a subset of a pipeline, configuration that does not apply to the current subset but would be valid in the original pipeline is now allowed and ignored.
+- GCSComputeLogManager was added, allowing for compute logs to be persisted to Google cloud storage
+- The step-partition matrix in Dagit now auto-reloads runs
+
+**Bugfixes**
+
+- Dagit bugfixes and improvements
+- When specifying a namespace during helm install, the same namespace will now be used by the K8sScheduler or K8sRunLauncher, unless overridden.
+- `@pipeline` decorated functions with -> None typing no longer cause unexpected problems.
+- Fixed an issue where compute logs might not always be complete on Windows.
+
## 0.9.18
**Breaking Changes**
|
Update Pennsylvania.md
Added another incident under Pittsburgh. | @@ -76,3 +76,13 @@ A woman in East Liberty gets onto her knees and puts her hands in the air, while
**Links**
* https://www.youtube.com/watch?v=TxHxU6nhzzQ
+
+### Police fire tear gas and rubber bullets on peaceful assembly | June 1st
+
+Police declare a peaceful protest an unlawful assembly. They then escalate the situation by firing tear gas and rubber bullets on the crowd.
+
+**Links**
+
+* https://www.reddit.com/r/pittsburgh/comments/guzshz/police_declaring_an_unlawful_assembly_against_a/
+* https://www.facebook.com/BenjaminKFerris/posts/3091613010894973
+* https://twitter.com/gautamyadav818/status/1267606317893550080
|
[commands] Fix grammar
Either implies that there will be two things, there is only one. | @@ -560,7 +560,7 @@ This command can be invoked any of the following ways:
Error Handling
----------------
-When our commands fail to either parse we will, by default, receive a noisy error in ``stderr`` of our console that tells us
+When our commands fail to parse we will, by default, receive a noisy error in ``stderr`` of our console that tells us
that an error has happened and has been silently ignored.
In order to handle our errors, we must use something called an error handler. There is a global error handler, called
|
Typo fix of controlled SWAP gate documentation
* Update for typo fix of controlled swap gate
In response to Issue
* To keep consistency. | @@ -104,7 +104,7 @@ class SwapGate(Gate):
class CSwapGate(ControlledGate):
- r"""Controlled-X gate.
+ r"""Controlled-SWAP gate, also known as the Fredkin gate.
**Circuit symbol:**
|
Fix usage of _hail_package in eval_expr
Fix usage of _hail_package so that eval_expr doesn't crash if
used without `hl.init` first. | @@ -193,7 +193,7 @@ def eval_expr_typed(expression):
if expression._indices.source is None:
return (expression.dtype._from_json(
- java.Env._hail_package.expr.ir.Interpret.interpretPyIR(str(expression._ir))),
+ java.Env.hail().expr.ir.Interpret.interpretPyIR(str(expression._ir))),
expression.dtype)
else:
return expression.collect()[0], expression.dtype
|
Change default CSV encoding to BOM
Since Python's 'utf-8-sig' encoding works with and without BOM, we can
support with BOM by changing the default encoding. | @@ -163,7 +163,7 @@ class CSVKitUtility(object):
self.argparser.add_argument('-z', '--maxfieldsize', dest='field_size_limit', type=int,
help='Maximum length of a single field in the input CSV file.')
if 'e' not in self.override_flags:
- self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8',
+ self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8-sig',
help='Specify the encoding of the input CSV file.')
if 'L' not in self.override_flags:
self.argparser.add_argument('-L', '--locale', dest='locale', default='en_US',
|
lookup_case filters by case type
So this check is redundant | @@ -86,10 +86,7 @@ class _Importer(object):
)
self.log_case_lookup()
- if case:
- if case.type != self.config.case_type:
- return # TODO Add error message about skipped row
- elif error == LookupErrors.NotFound:
+ if error == LookupErrors.NotFound:
if not self.config.create_new_cases:
return
elif error == LookupErrors.MultipleResults:
|
Update third-party-packages.md
Docs: Add link to ChannelBox in Third party packages | @@ -39,6 +39,13 @@ Serverless ASGI adapter for AWS Lambda & API Gateway.
Manage and send messages to groups of channels using websockets.
Checkout <a href="https://github.com/taoufik07/nejma-chat" target="_blank">nejma-chat</a>, a simple chat application built using `nejma` and `starlette`.
+### ChannelBox
+
+<a href="https://github.com/Sobolev5/channel-box" target="_blank">GitHub</a>
+
+Another solution for websocket broadcast. Send messages to channel groups from any part of your code.
+Checkout <a href="http://backend.starlette-vue.site/chat/chat1/" target="_blank">channel-box-chat</a>, a simple chat application built using `channel-box` and `starlette`.
+
### Scout APM
<a href="https://github.com/scoutapp/scout_apm_python" target="_blank">GitHub</a>
|
Truncate long source file name and prevent negative width field
Fixes | @@ -7222,7 +7222,7 @@ class ContextCommand(GenericCommand):
trail_len = len(m) + 6
title = ""
title += Color.colorify("{:{padd}<{width}} ".format("",
- width=self.tty_columns - trail_len,
+ width=max(self.tty_columns - trail_len, 0),
padd=HORIZONTAL_LINE),
line_color)
title += Color.colorify(m, msg_color)
@@ -7525,7 +7525,10 @@ class ContextCommand(GenericCommand):
return
nb_line = self.get_setting("nb_lines_code")
- title = "source:{0:s}+{1:d}".format(symtab.filename, line_num + 1)
+ fn = symtab.filename
+ if len(fn) > 20:
+ fn = "{}[...]{}".format(fn[:15], os.path.splitext(fn)[1])
+ title = "source:{}+{}".format(fn, line_num + 1)
cur_line_color = get_gef_setting("theme.source_current_line")
self.context_title(title)
|
Update python-package-conda.yml
Update workflow after rename of ubuntu conda file. | @@ -17,7 +17,7 @@ jobs:
ENV_FILE: install/envs/mac.yml
- os: ubuntu-latest
- ENV_FILE: install/envs/pc.yml
+ ENV_FILE: install/envs/ubuntu.yml
fail-fast: false
defaults:
run:
|
Disable DYNAMIC_COMPLETIONS flag for builds older than 4075
Workaround:
See:
See: | @@ -286,7 +286,8 @@ class CompletionHandler(LSPViewEventListener):
flags |= sublime.INHIBIT_REORDER
if isinstance(response, dict):
response_items = response["items"] or []
- if response.get("isIncomplete", False):
+ # TODO: Remove this version check when everyone is past 4074.
+ if response.get("isIncomplete", False) and int(sublime.version()) >= 4075:
flags |= sublime.DYNAMIC_COMPLETIONS
elif isinstance(response, list):
response_items = response
|
Simplifying InvalidCnpjCpfClassifier implementation
The InvalidCnpjCpfClassifier was doing unnecessary copy of
the whole dataset and also converting one of the dataframe columns
for no apparent reason. This change should improve running time for
the classifier as well as its memory footprint. | @@ -14,9 +14,7 @@ class InvalidCnpjCpfClassifier(TransformerMixin):
return self
def predict(self, X):
- self._X = X.copy()
- self._X['cnpj_cpf'] = self._X['cnpj_cpf'].astype(np.str)
- return np.r_[self._X.apply(self.__is_invalid, axis=1)]
+ return np.r_[X.apply(self.__is_invalid, axis=1)]
def __is_invalid(self, row):
- return (row['document_type'] in [0, 1]) & (not cpfcnpj.validate(row['cnpj_cpf']))
+ return (row['document_type'] in [0, 1]) & (not cpfcnpj.validate(str(row['cnpj_cpf'])))
|
Mark entity as changed if fuzzy gets rejected
And only call the expensive calculate_stats() if neccessary - when
translation changes status. | @@ -553,8 +553,12 @@ def reject_translation(request):
)
# Check if translation was approved. We must do this before unapproving it.
- if translation.approved:
+ if translation.approved or translation.fuzzy:
translation.entity.mark_changed(locale)
+ TranslatedResource.objects.get(
+ resource=translation.entity.resource,
+ locale=locale
+ ).calculate_stats()
translation.rejected = True
translation.rejected_user = request.user
@@ -572,10 +576,6 @@ def reject_translation(request):
project = translation.entity.resource.project
TranslationMemoryEntry.objects.filter(translation=translation).delete()
- TranslatedResource.objects.get(
- resource=translation.entity.resource,
- locale=locale
- ).calculate_stats()
return JsonResponse({
'translation': latest_translation,
|
Remove interactive docstring
This is related to an old issue and | @@ -1401,8 +1401,9 @@ class JobFunctionWrappingJob(FunctionWrappingJob):
- memory
- disk
- cores
- For example to wrap a function into a job we would call
- >>> Job.wrapJobFn(myJob, memory='100k', disk='1M', cores=0.1)
+ For example to wrap a function into a job we would call:
+
+ ``Job.wrapJobFn(myJob, memory='100k', disk='1M', cores=0.1)``
"""
@property
|
use env for all commands
Add opengl plug | name: maestral
-base: core18 # the base snap is the execution environment for this snap
+base: core18
license: MIT
adopt-info: maestral
icon: maestral/resources/maestral.png
@@ -21,21 +21,23 @@ apps:
- home
- network
- unity7
+ - opengl
maestral-qt:
command: maestral gui
desktop: share/applications/maestral.desktop
- environment:
- # Coerce XDG_CURRENT_DESKTOP to Unity so that App Indicators
- # are used and do not fall back to Notification Area applets
- # or disappear completely.
- XDG_CURRENT_DESKTOP: Unity:Unity7
-
extensions:
- kde-neon
plugs:
- home
- network
- unity7
+ - opengl
+
+environment:
+ # Coerce XDG_CURRENT_DESKTOP to Unity so that App Indicators
+ # are used and do not fall back to Notification Area applets
+ # or disappear completely.
+ XDG_CURRENT_DESKTOP: Unity:Unity7
parts:
maestral:
|
Use gcc-7 for testing yask on Travis
Needed in case a skylake platform is assigned | @@ -21,7 +21,7 @@ matrix:
env: DEVITO_ARCH=gcc-4.9 DEVITO_OPENMP=1 OMP_NUM_THREADS=2
- os: linux
python: "3.6"
- env: DEVITO_ARCH=gcc-4.9 DEVITO_OPENMP=0 DEVITO_BACKEND=yask
+ env: DEVITO_ARCH=gcc-7 DEVITO_OPENMP=0 DEVITO_BACKEND=yask
allow_failures:
- os: linux
python: "2.7"
@@ -33,12 +33,14 @@ matrix:
addons:
apt:
sources:
- - ubuntu-toolchain-r-test # For gcc 4.9 and 5
+ - ubuntu-toolchain-r-test # For gcc 4.9, 5 and 7
packages:
- gcc-4.9
- g++-4.9
- gcc-5
- g++-5
+ - gcc-7
+ - g++-7
before_install:
# Setup anaconda
|
Actions: May have to run "apt-get update" for Ubuntu
* It seems the runners might become out of sync with the repos
otherwise. | @@ -36,6 +36,7 @@ jobs:
- name: Install Nuitka dependencies
run: |
+ sudo apt-get update
sudo apt-get install chrpath gdb ccache
pip install -r requirements-devel.txt
|
fw/entrypoint: Add check for system default encoding
Check what the default encoding for the system is set to. If this is not
configured to use 'UTF-8', log a warning to the user as this is known
to cause issues when attempting to parse none ascii files during operation. | import sys
import argparse
+import locale
import logging
import os
import warnings
@@ -76,6 +77,18 @@ def check_devlib_version():
raise HostError(msg.format(format_version(required_devlib_version), devlib.__version__))
+# If the default encoding is not UTF-8 warn the user as this may cause compatibility issues
+# when parsing files.
+def check_system_encoding():
+ system_encoding = locale.getpreferredencoding()
+ msg = 'System Encoding: {}'.format(system_encoding)
+ if 'UTF-8' not in system_encoding:
+ logger.warning(msg)
+ logger.warning('To prevent encoding issues please use a locale setting which supports UTF-8')
+ else:
+ logger.debug(msg)
+
+
def main():
if not os.path.exists(settings.user_directory):
init_user_directory()
@@ -115,6 +128,7 @@ def main():
logger.debug('devlib version: {}'.format(devlib.__full_version__))
logger.debug('Command Line: {}'.format(' '.join(sys.argv)))
check_devlib_version()
+ check_system_encoding()
# each command will add its own subparser
subparsers = parser.add_subparsers(dest='command')
|
[FIX] replaceCategoryInPlace: Allow LRM and RLM at the end of the old_cat title
Left-to-right and right-to-left marks commonly occur at the end of the
category name due to copy and pasting. | @@ -1394,10 +1394,12 @@ def replaceCategoryInPlace(oldtext, oldcat, newcat, site=None,
title = '[%s%s]' % (title[0].upper(), title[0].lower()) + title[1:]
# spaces and underscores in page titles are interchangeable and collapsible
title = title.replace(r'\ ', '[ _]+').replace(r'\_', '[ _]+')
- categoryR = re.compile(r'\[\[\s*(%s)\s*:\s*%s\s*((?:\|[^]]+)?\]\])'
+ categoryR = re.compile(r'\[\[\s*(%s)\s*:\s*%s[\s\u200e\u200f]*'
+ r'((?:\|[^]]+)?\]\])'
% (catNamespace, title), re.I)
categoryRN = re.compile(
- r'^[^\S\n]*\[\[\s*(%s)\s*:\s*%s\s*((?:\|[^]]+)?\]\])[^\S\n]*\n'
+ r'^[^\S\n]*\[\[\s*(%s)\s*:\s*%s[\s\u200e\u200f]*'
+ r'((?:\|[^]]+)?\]\])[^\S\n]*\n'
% (catNamespace, title), re.I | re.M)
exceptions = ['nowiki', 'comment', 'math', 'pre', 'source']
if newcat is None:
|
Update database.md
Updated the description of the pg_trgm extension. | @@ -23,7 +23,7 @@ The Prefect Orion database persists data used by many features of Prefect to per
Currently Prefect Orion supports the following databases:
- SQLite: The default in Prefect Orion, and our recommendation for lightweight, single-server deployments. SQLite requires essentially no setup.
-- PostgreSQL: Best for connecting to external databases, but does require additional setup (such as Docker). Prefect Orion uses the [`pg_trgm`](https://www.postgresql.org/docs/current/pgtrgm.html) extension, so it must be installed and added to a schema visible to the Postgres user in your connection string.
+- PostgreSQL: Best for connecting to external databases, but does require additional setup (such as Docker). Prefect Orion uses the [`pg_trgm`](https://www.postgresql.org/docs/current/pgtrgm.html) extension, so it must be installed and enabled.
## Using the database
|
Fallback to installed tomli when vendor is removed
In Fedora, we bootstrap tomli differently, so we remove the vendored
version. This makes it so we don't also have to patch flit_core.config
ourselves. | @@ -10,7 +10,12 @@ import re
try:
import tomllib
except ImportError:
+ try:
from .vendor import tomli as tomllib
+ # Some downstream distributors remove the vendored tomli.
+ # When that is removed, import tomli from the regular location.
+ except ImportError:
+ import tomli as tomllib
from .versionno import normalise_version
|
Fix typo in installing.md
Typo | @@ -13,7 +13,7 @@ Ansible-lint does not currently support installation on Windows systems.
```
For a container image, we recommend using [creator-ee](https://github.com/ansible/creator-ee/), which includes Ansible-lint.
-If you have a use case that the `creator-ee` container does satisfy, please contact the team through the [discussions](https://github.com/ansible/ansible-lint/discussions) forum.
+If you have a use case that the `creator-ee` container doesn't satisfy, please contact the team through the [discussions](https://github.com/ansible/ansible-lint/discussions) forum.
You can also run Ansible-lint on your source code with the [Ansible-lint GitHub action](https://github.com/marketplace/actions/ansible-lint) instead of installing it directly.
|
client: use exact window URL for websocket connections
This prevents interference with http pass through views | @@ -1137,7 +1137,8 @@ function Lona(settings) {
protocol = 'wss://';
}
- this._ws = new WebSocket(protocol + window.location.host);
+ this._ws = new WebSocket(
+ protocol + window.location.host + window.location.pathname);
this._ws.lona = this;
this._ws.onmessage = this._handle_raw_websocket_message;
|
ArnoldAttributesUI : Remove redundant metadata
This is for an attribute that was removed. | @@ -375,16 +375,6 @@ Gaffer.Metadata.registerNode(
],
- "attributes.subdivType.value" : [
-
- "preset:None", "none",
- "preset:Linear", "linear",
- "preset:Catclark", "catclark",
-
- "plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
-
- ],
-
"attributes.subdivIterations" : [
"description",
|
pkgutil: returns a List[str] usually
Yes, technically it returns whatever its first argument is if it isn't
a list.
Doing this because | import sys
from _typeshed import SupportsRead
-from typing import IO, Any, Callable, Iterable, Iterator, NamedTuple, Optional, Tuple, Union
+from typing import IO, Any, Callable, Iterable, Iterator, List, NamedTuple, Optional, Tuple, Union
if sys.version_info >= (3,):
from importlib.abc import Loader, MetaPathFinder, PathEntryFinder
@@ -18,7 +18,7 @@ if sys.version_info >= (3, 6):
else:
_ModuleInfoLike = Tuple[Union[MetaPathFinder, PathEntryFinder], str, bool]
-def extend_path(path: Iterable[str], name: str) -> Iterable[str]: ...
+def extend_path(path: List[str], name: str) -> List[str]: ...
class ImpImporter:
def __init__(self, path: Optional[str] = ...) -> None: ...
|
Python API: leave non exposed fields out of the AST node dumper
TN: | @@ -364,6 +364,8 @@ class AnalysisUnit(object):
class LexicalEnv(object):
${py_doc('langkit.lexical_env_type', 4)}
+ _exposed = False
+
def __init__(self, c_value):
self._c_value = c_value
@@ -421,12 +423,14 @@ class BasePointerBinding(object):
class LogicVar(BasePointerBinding):
${py_doc('langkit.logic_var_type', 4)}
- pass
+
+ _exposed = False
class Equation(BasePointerBinding):
${py_doc('langkit.equation_type', 4)}
- pass
+
+ _exposed = False
% endif
@@ -778,7 +782,9 @@ class ${root_astnode_name}(object):
else:
for name, value in self.iter_fields(with_properties=False):
# Remove the f_ prefix to have the same behavior as the Ada
- # dumper.
+ # dumper. Also filter out non-exposed types to keep the same
+ # output with debug builds.
+ if getattr(value, '_exposed', True):
print_node(name[2:], value)
def findall(self, ast_type_or_pred, **kwargs):
|
progress bar
configure tqdm for colab | @@ -194,9 +194,7 @@ def train_model(args):
total = 0
logits = []
labels = []
- for input_ids, batch_labels in tqdm.tqdm(
- eval_dataloader, desc="Evaluating accuracy"
- ):
+ for input_ids, batch_labels in eval_dataloader:
if isinstance(input_ids, dict):
## HACK: dataloader collates dict backwards. This is a temporary
# workaround to get ids in the right shape
@@ -265,8 +263,8 @@ def train_model(args):
loss.backward()
return loss
- for epoch in tqdm.trange(int(args.num_train_epochs), desc="Epoch"):
- prog_bar = tqdm.tqdm(train_dataloader, desc="Iteration")
+ for epoch in tqdm.trange(int(args.num_train_epochs), desc="Epoch", position=0, leave=True):
+ prog_bar = tqdm.tqdm(train_dataloader, desc="Iteration", position=0, leave=False)
for step, batch in enumerate(prog_bar):
input_ids, labels = batch
labels = labels.to(device)
|
Error in docs for configuring dvr router
According to the docs on this link:
"Configure the Open vSwitch agent. Add the following to
/etc/neutron/plugins/ml2/ml2_conf.ini:"
It should be openvswitch_agent.ini rather than ml2_conf.ini. | @@ -113,7 +113,7 @@ Network nodes
-------------
#. Configure the Open vSwitch agent. Add the following to
- ``/etc/neutron/plugins/ml2/ml2_conf.ini``:
+ ``/etc/neutron/plugins/ml2/openvswitch_agent.ini``:
.. code-block:: ini
@@ -148,7 +148,7 @@ Compute nodes
-------------
#. Configure the Open vSwitch agent. Add the following to
- ``/etc/neutron/plugins/ml2/ml2_conf.ini``:
+ ``/etc/neutron/plugins/ml2/openvswitch_agent.ini``:
.. code-block:: ini
|
docs: qtile is official package now on Arch Linux
Closes | Installing on Arch Linux
========================
-Qtile is available on the `AUR`_ as:
-
-======================= =======================
-Package Name Description
-======================= =======================
-`qtile`_ stable branch (release)
-`qtile-python3-git`_ development branch
-======================= =======================
-
-Using an AUR Helper
-===================
-
-The preferred way to install Qtile is with an `AUR helper`_. For example,
-if you use `yaourt`_:
-
-.. code-block:: bash
-
- yaourt -S <package-name>
-
-Using makepkg
-=============
-
-The latest version of either package can be obtained by downloading a snapshot
-or cloning its repository:
-
-.. code-block:: bash
-
- # snapshot
- curl -s https://aur.archlinux.org/cgit/aur.git/snapshot/<package-name>.tar.gz | tar -xvzf -
- # or repository
- git clone https://aur.archlinux.org/<package-name>.git
-
-Next makepkg has to be called in the directory where the files were saved. It
-installs missing dependencies using pacman, builds the package, installs it
-and removes obsolete build-time dependencies afterwards:
+Stable versions of Qtile are currently packaged for Arch Linux. To install this package, run:
.. code-block:: bash
- cd <package-name>
- makepkg -sri
+ pacman -S qtile
-Please see the ArchWiki for more information on `installing packages from the AUR`_.
+Please see the ArchWiki for more information on `Qtile`_.
-.. _AUR: https://wiki.archlinux.org/index.php/AUR
-.. _AUR Helper: https://wiki.archlinux.org/index.php/AUR_Helpers
-.. _installing packages from the AUR: https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages
-.. _qtile: https://aur.archlinux.org/packages/qtile/
-.. _qtile-python3-git: https://aur.archlinux.org/packages/qtile-python3-git/
-.. _yaourt: https://archlinux.fr/yaourt-en
+.. _Qtlie: https://wiki.archlinux.org/index.php/Qtile
|
adds ssh_resource changes to CHANGES.md
Summary: update CHANGES.md
Test Plan: BK
Reviewers: prha | - Fixes bug in `launch_scheduled_execution` that would mask configuration errors.
- Fixes bug in dagit where schedule related errors were not shown.
+**New**
+
+- _dagster-ssh_
+ - adds SFTP get and put functions to `SSHResource`, replacing sftp_solid.
+
## 0.8.1
**Bugfix**
|
Added GDB online editor
This editor is far more complete, and it's the only one that ran everything properly. | @@ -18,4 +18,5 @@ Welcome to Sample Programs in Swift!
- [Swift Wiki](https://en.wikipedia.org/wiki/Swift_(programming_language))
- [Swift Docs](https://swift.org/)
- [Swift GitHub](https://github.com/apple/swift)
-- [Swift Online Editor](https://iswift.org/playground)
+- [Swift Online Editor (iswift)](https://iswift.org/playground)
+- [Swift Online Editor (GDB)](https://www.onlinegdb.com/online_swift_compiler)
|
Update README
Correct table name for checking results | @@ -17,7 +17,7 @@ python3 user_last_login.py --cluster <cluster dns end point> --dbPort <port> --d
The results are stored in a table on the cluster. The schema and table are created if they do not already exist, or otherwise are truncated and repopulated on each execution of the script. View the results of the run by executing the following SQL query on the cluster:
```
-Select * from history.stg_user_last_login;
+Select * from history.user_last_login;
```
## Working Details and Limitations
|
Time nose
* update travis to use xenial
> 3.7 not available on default Ubuntu/travis builds, so using
instructions here: to
update us to 3.7 (while keeping 3.6) and maybe later pre-release dev
builds
* add time report to nose | @@ -26,13 +26,14 @@ before_script:
- pip install numpy
- pip install scipy
- pip install scikit-learn
+- pip install nose-timer
script:
# Notes on nose:
# Travis CI pre-installs `nose`
# https://github.com/coagulant/coveralls-python#nosetests
# http://nose.readthedocs.org/en/latest/plugins/skip.html
-- nosetests --no-skip --with-coverage --cover-package=cltk --with-doctest
+- nosetests --no-skip --with-coverage --cover-package=cltk --with-doctest --with-timer
- ( cd docs && make doctest; )
after_success:
|
Add `default_extension` setting hint.
This commit adds the setting hint for `default_extension` | // Added in 405x.
"context_menu": null,
+ // The default extension to be showed as the file save type in the OS
+ // file save manager, for new untitled files.
+ "default_extension": "",
+
// UI scaling factor (deprecated)
// Removed in 3181.
"dpi_scale": 1.0,
|
Clarify top plate orientation to roll cage
This was not originally clear and I ended up attaching it upside down during my build. | @@ -142,6 +142,8 @@ Once you have slid the nut in, you can attach the bottom plate. Once again, thi

+When attaching the roll cage to the top plate, ensure that the nubs on the top plate face the roll-cage. This will ensure the equipment you mount to the top plate fits easily.
+
### Step 4: Connect Servo Shield to Raspberry Pi.
You could do this after attaching the Raspberry Pi to the bottom plate, I just think it is easier to see the parts when they are laying on the workbench. Connect the parts as you see below:
|
Update instructions.append.md
Change the message in the docs' raised exception to better reflect what the tests are expecting | @@ -10,5 +10,5 @@ To raise a `ValueError` with a message, write the message as an argument to the
```python
# example when argument is zero or a negative integer
-raise ValueError("Only positive numbers are allowed")
+raise ValueError("Only positive integers are allowed")
```
|
temporary bug fix
create parent directories if necessary | @@ -821,8 +821,6 @@ class MaestralClient(object):
all_files.sort(key=lambda x: x.path_display)
all_deleted.sort(key=lambda x: x.path_display)
- print(all_folders)
-
# apply created folders (not in parallel!)
for folder in all_folders:
success = self._create_local_entry(folder)
@@ -919,7 +917,7 @@ class MaestralClient(object):
if not osp.isdir(dst_path):
try:
- os.mkdir(dst_path)
+ os.makedirs(dst_path)
except FileExistsError:
pass
|
Make 2 optional config parameters recognizable
Fix | @@ -172,6 +172,8 @@ def read_and_validate_experiment_config(config_filename: str) -> Dict:
Requirement(not local_experiment, str, False, ''),
'experiment':
Requirement(False, str, False, ''),
+ 'cloud_sql_instance_connection_name':
+ Requirement(False, str, True, ''),
'snapshot_period':
Requirement(False, int, False, ''),
'local_experiment':
@@ -180,6 +182,8 @@ def read_and_validate_experiment_config(config_filename: str) -> Dict:
Requirement(False, bool, False, ''),
'merge_with_nonprivate':
Requirement(False, bool, False, ''),
+ 'preemptible_runners':
+ Requirement(False, bool, False, ''),
}
all_params_valid = _validate_config_parameters(config, config_requirements)
|
Fix Point __str__ formatting
.2G is two decimal places. .2F is two decimal places after the decimal point. This is a huge difference. | @@ -2032,12 +2032,12 @@ class Point:
def __str__(self):
try:
- x_str = "%.2G" % self.x
+ x_str = "%.2F" % self.x
except TypeError:
return self.__repr__()
if "." in x_str:
x_str = x_str.rstrip("0").rstrip(".")
- y_str = "%.2G" % self.y
+ y_str = "%.2F" % self.y
if "." in y_str:
y_str = y_str.rstrip("0").rstrip(".")
return "%s,%s" % (x_str, y_str)
|
fw/job: only finalize if initialized
Only run finalize() for a job if initialize has succeed. finalize()
should be able to assume that initialize() has succeed, without needing
to check that that file have been created, variables set, etc. | @@ -28,6 +28,10 @@ class Job(object):
def status(self):
return self._status
+ @property
+ def has_been_initialized(self):
+ return self._has_been_initialized
+
@status.setter
def status(self, value):
self._status = value
@@ -43,6 +47,7 @@ class Job(object):
self.output = None
self.run_time = None
self.retries = 0
+ self._has_been_initialized = False
self._status = Status.NEW
def load(self, target, loader=pluginloader):
@@ -67,6 +72,7 @@ class Job(object):
self.workload.logger.context = context
self.workload.initialize(context)
self.set_status(Status.PENDING)
+ self._has_been_initialized = True
context.update_job_state(self)
def configure_augmentations(self, context, pm):
@@ -135,6 +141,8 @@ class Job(object):
self.workload.teardown(context)
def finalize(self, context):
+ if not self._has_been_initialized:
+ return
if not context.tm.is_responsive:
self.logger.info('Target unresponsive; not finalizing.')
return
|
Fix a tiny typo
We should only log this message if we're checking the
notification-specific limit. | @@ -86,7 +86,7 @@ def check_service_over_daily_message_limit(service, key_type, notification_type,
)
# TODO: Remove this - only temporarily logging so it's easy to check/make sure no-one is hitting this
# rate limit while we roll it out
- if notification_type is not None:
+ if notification_type_ is not None:
current_app.logger.warning(
"notification-specific rate limit hit: {} at {} of {}".format(
notification_type, service_stats, limit_value
|
Update article.md
Small typos | @@ -34,7 +34,7 @@ Best viewed and edited with [Typora](http://typora.io).
### Rays through optical elements
-For completeness, we start with a very compact introduction to the ray matrix formalism to avoid. The ABCD matrix formalism (or ray matrices) allows a ray (column vector) to be transformed from one reference plane to another through different optical elements (represented by matrices). A ray is defined as :
+For completeness, we start with a very compact introduction to the ray matrix formalism. The ABCD matrix formalism (or ray matrices) allows a ray (column vector) to be transformed from one reference plane to another through different optical elements (represented by matrices). A ray is defined as :
$$
\mathbf{r} \equiv \Biggl[ \begin{matrix}
y \\
@@ -104,7 +104,7 @@ From this, we can already extract important properties for any optical systems:
3. **Principal planes:** Focal distances are measured from principal planes, which are planes of unity magnification in any systems where all the focusing power is concentrated. They are located at $L_\mathrm{PP_i} = \frac{{{n_1}/{n_2} - D}}{C}$ and $L_\mathrm{PP_o} = \frac{{1 - A}}{C}$. *[Explain and discuss signs, show a figure*]
4. **Optical invariant:** Finally, it can be shown that the product $n ( y_1 \theta_2 - y_2 \theta_1)$ for any two rays at a given point is a constant throughout the system. Therefore if a component cannot "support" a certain product, then it becomes clear the rays will be blocked.
-### Use of formalism c examples
+### Use of formalism in examples
We can easily recover the position of an image with respect to the position of an object with a thin lens matrix:
$$
|
improves cuda testing stability
in our current infra we have have memory issues. This is optional test. | @@ -9,6 +9,7 @@ shapes = [(512, 3, 256, 256), (256, 1, 64, 64)]
PSs = [224, 32]
[email protected](reason='May cause memory issues.')
def test_performance_speed(device, dtype):
if device.type != 'cuda' or not torch.cuda.is_available():
pytest.skip("Cuda not available in system,")
|
2.5.1
Automatically generated by python-semantic-release | @@ -9,7 +9,7 @@ https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers
"""
from datetime import timedelta
-__version__ = "2.5.0"
+__version__ = "2.5.1"
PROJECT_URL = "https://github.com/custom-components/alexa_media_player/"
ISSUE_URL = "{}issues".format(PROJECT_URL)
|
Allow for panning in the plane of the camera
Accessed via <CTRL>-[left click] | @@ -318,8 +318,22 @@ class GLViewWidget(QtOpenGL.QGLWidget):
self.mousePos = ev.pos()
if ev.buttons() == QtCore.Qt.LeftButton:
+ if (ev.modifiers() & QtCore.Qt.ControlModifier):
+ # pan in plane of camera
+ elev = np.radians(self.opts['elevation'])
+ azim = np.radians(self.opts['azimuth'])
+ fov = np.radians(self.opts['fov'])
+ dist = (self.opts['center'] - self.cameraPosition()).length()
+ fov_factor = np.tan(fov / 2) * 2
+ scale_factor = dist * fov_factor / self.width()
+ dx = diff.x()
+ dy = diff.y()
+ z = scale_factor * np.cos(elev) * dy
+ x = scale_factor * (np.sin(azim) * dx - np.sin(elev) * np.cos(azim) * dy)
+ y = scale_factor * (np.cos(azim) * dx + np.sin(elev) * np.sin(azim) * dy)
+ self.pan(x, -y, z, relative=False)
+ else:
self.orbit(-diff.x(), diff.y())
- #print self.opts['azimuth'], self.opts['elevation']
elif ev.buttons() == QtCore.Qt.MidButton:
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.pan(diff.x(), 0, diff.y(), relative=True)
|
Suppress warnings in tests
Summary: Pull Request resolved: | @@ -1222,6 +1222,7 @@ graph(%Ra, %Rb):
return grad_output
x = torch.tensor([0.], requires_grad=True)
+ with warnings.catch_warnings(record=True):
with self.assertRaisesRegex(RuntimeError, "MyLegacyFn"):
torch.jit.get_trace_graph(lambda x: MyLegacyFn()(x), (x,))
@@ -4526,6 +4527,7 @@ a")
return foo(x)
f = io.BytesIO()
+ with warnings.catch_warnings(record=True):
torch.onnx.export(MyDrop(), (eg,), f, verbose=False)
@unittest.skip("RuntimeError: VariableType::ID() not implemented")
@@ -6984,9 +6986,9 @@ a")
m.sub = nn.Linear(5, 5)
def test_script_inline_trace_multiple_args(self):
- class M(torch.jit.ScriptModule):
+ class M(torch.nn.Module):
def __init__(self):
- super(M, self).__init__(False)
+ super(M, self).__init__()
def forward(self, input, input2):
return input + input2
|
Fix checkFormat fail for Null input
When optional input is null we don't need to call
function checkFormat for this input. | @@ -551,7 +551,7 @@ class Process(object):
if self.formatgraph:
for i in self.tool["inputs"]:
d = shortname(i["id"])
- if d in builder.job and i.get("format"):
+ if d in builder.job and i.get("format") and builder.job[d]:
checkFormat(builder.job[d], builder.do_eval(i["format"]), self.formatgraph)
builder.bindings.extend(builder.bind_input(self.inputs_record_schema, builder.job))
|
N->O Upgrade, make sure all nova placement parameter properly set.
The restart of openstack-nova-compute takes place before crudini set
the password, user_domain and project_name get set.
Closes-Bug: | @@ -65,18 +65,21 @@ resources:
- " crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n"
- " crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n"
- " crudini --set /etc/nova/nova.conf placement project_name service\n\n"
- - " systemctl restart openstack-nova-compute\n\n"
- - "fi\n\n"
- str_replace:
template: |
crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD'
crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME'
crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL'
- ROLE='ROLE_NAME'
params:
SERVICE_PASSWORD: { get_param: NovaPassword }
REGION_NAME: { get_param: KeystoneRegion }
AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ - " systemctl restart openstack-nova-compute\n\n"
+ - "fi\n\n"
+ - str_replace:
+ template: |
+ ROLE='ROLE_NAME'
+ params:
ROLE_NAME: {{role.name}}
- get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
- get_file: ../extraconfig/tasks/run_puppet.sh
|
Update panels.py
Panel heading typo fix: "Rendering setings" -> "Render settings" | @@ -81,7 +81,7 @@ flat_menu_content_panels = (
menu_settings_panels = (
MultiFieldPanel(
- heading=_('Rendering setings'),
+ heading=_('Render settings'),
children=(
FieldPanel('max_levels'),
FieldPanel('use_specific')
|
QRadar: Fix pagination
* Fix pagination
* Update integration-QRadar.yml
* Update integration-QRadar.yml
We're counting from zero
* Update integration-QRadar.yml | @@ -235,13 +235,13 @@ script:
var totalOffenses = res.length;
var lastCallOffenses = res.length;
while (startTime && lastCallOffenses >= offensesPerCall) {
- var min = res.length;
- var max = min + offensesPerCall;
+ var from = totalOffenses; // we're counting from zero
+ var to = from + offensesPerCall - 1;
var lastIncidentTime = res[res.length-1].start_time;
- res = res.concat(getOffenses('start_time>' + startTime + ' AND start_time<' + lastIncidentTime + (query ? (' AND (' + query +')') : ''), undefined, min + '-' + max));
- lastCallOffenses = res.length - totalOffenses;
- totalOffenses = res.length;
+ res = res.concat(getOffenses('start_time>' + startTime + ' AND start_time<' + lastIncidentTime + (query ? (' AND (' + query +')') : ''), undefined, from + '-' + to));
+ lastCallOffenses = res.length;
+ totalOffenses = totalOffenses + res.length;
}
var incidents = [];
@@ -534,4 +534,4 @@ script:
description: Retrieve a note for an offense
isfetch: true
runonce: false
-releaseNotes: "-"
+releaseNotes: "Fix bug where pagination missed some incidents"
|
Simplify extension discovery using pkgutil
The cog now keeps a set of full qualified names of all extensions. | import logging
import os
from enum import Enum
+from pkgutil import iter_modules
from discord import Colour, Embed
from discord.ext.commands import Bot, Cog, Context, group
-from bot.constants import (
- Emojis, MODERATION_ROLES, Roles, URLs
-)
+from bot.constants import Emojis, MODERATION_ROLES, Roles, URLs
from bot.decorators import with_role
from bot.pagination import LinePaginator
@@ -29,19 +28,10 @@ class Extensions(Cog):
def __init__(self, bot: Bot):
self.bot = bot
- self.cogs = {}
-
- # Load up the cog names
- log.info("Initializing cog names...")
- for filename in os.listdir("bot/cogs"):
- if filename.endswith(".py") and "_" not in filename:
- if os.path.isfile(f"bot/cogs/{filename}"):
- cog = filename[:-3]
-
- self.cogs[cog] = f"bot.cogs.{cog}"
- # Allow reverse lookups by reversing the pairs
- self.cogs.update({v: k for k, v in self.cogs.items()})
+ log.info("Initialising extension names...")
+ modules = iter_modules(("bot/cogs", "bot.cogs"))
+ self.cogs = set(ext for ext in modules if ext.name[-1] != "_")
@group(name='extensions', aliases=('c', 'ext', 'exts'), invoke_without_command=True)
@with_role(*MODERATION_ROLES, Roles.core_developer)
|
Update gromacs_check.py
Added missing `+` in the pattern match | @@ -18,9 +18,9 @@ class GromacsBaseCheck(RunOnlyRegressionTest):
self.keep_files = [output_file]
energy = sn.extractsingle(r'\s+Potential\s+Kinetic En\.\s+Total Energy'
- r'\sConserved En\.\s+Temperature\n'
+ r'\s+Conserved En\.\s+Temperature\n'
r'(\s+\S+){2}\s+(?P<energy>\S+)(\s+\S+){2}\n'
- r'\sPressure \(bar\)\s+Constr\. rmsd',
+ r'\s+Pressure \(bar\)\s+Constr\. rmsd',
output_file, 'energy', float, item=-1)
energy_reference = -3270799.9
energy_diff = sn.abs(energy - energy_reference)
|
Fix Concat Dimension Bug
Summary:
Pull Request resolved:
This diff is similar to We need to handle the edge case when add_axis=1. | @@ -196,7 +196,9 @@ OpSchema::Cost CostInferenceForConcat(
: GetDimFromOrderString(
helper.GetSingleArgument<string>("order", "NCHW"));
bool add_axis = helper.GetSingleArgument<int>("add_axis", 0) != 0;
- const int canonical_axis = canonical_axis_index_(axis, in[0].dims_size());
+ int adj_size = in[0].dims_size() + (add_axis ? 1 : 0);
+ const int canonical_axis = canonical_axis_index_(axis, adj_size);
+ CAFFE_ENFORCE_LT(canonical_axis, adj_size, "Axis not in input ndim range.");
CAFFE_ENFORCE_GT(in.size(), 0);
vector<int> out_shape(in[0].dims().begin(), in[0].dims().end());
if (add_axis) {
|
Add update_reservation to dummy plugin
update_reservation is now an abstract method. It needs to be added to
all plugins. | @@ -25,6 +25,9 @@ class DummyVMPlugin(base.BasePlugin):
def reserve_resource(self, reservation_id, values):
return None
+ def update_reservation(self, reservation_id, values):
+ return None
+
def on_start(self, resource_id):
"""Dummy VM plugin does nothing."""
return 'VM %s should be waked up this moment.' % resource_id
|
remove path for deprecated case properties
Deprecated properties will be ignored when sending FHIR data, so path is
redundant | @@ -158,7 +158,7 @@ def save_case_property(name, case_type, domain=None, data_type=None,
def _update_fhir_resource_property(case_property, fhir_resource_type, fhir_resource_prop_path, remove_path=False):
from corehq.motech.fhir.models import FHIRResourceProperty
- if remove_path:
+ if case_property.deprecated or remove_path:
try:
FHIRResourceProperty.objects.get(case_property=case_property,
resource_type=fhir_resource_type,
|
[FIX] Run example from Github command
Github uses https per default. I got an error with the git@ command. | @@ -205,7 +205,7 @@ Now that you have your training code, you can package it so that other data scie
specified in ``conda.yaml``.
If the repository has an ``MLproject`` file in the root you can also run a project directly from GitHub. This tutorial is duplicated in the https://github.com/mlflow/mlflow-example repository
- which you can run with ``mlflow run [email protected]:mlflow/mlflow-example.git -P alpha=0.42``.
+ which you can run with ``mlflow run https://github.com/mlflow/mlflow-example.git -P alpha=5``.
.. container:: R
|
Use `"coma"` for `,` to work around conformer b-series issue
Fixes | @@ -132,6 +132,8 @@ punctuation_words = {
"back tick": "`",
"grave": "`",
"comma": ",",
+ # Workaround for issue with conformer b-series; see #946
+ "coma": ",",
"period": ".",
"full stop": ".",
"semicolon": ";",
|
Cleanup init_process_group
Summary:
Pull Request resolved:
torch.distributed.init_process_group() has had many parameters added, but the contract isn't clear. Adding documentation, asserts, and explicit args should make this clearer to callers and more strictly enforced. | @@ -306,12 +306,23 @@ def get_backend(group=group.WORLD):
def init_process_group(backend,
- init_method="env://",
+ init_method=None,
timeout=_default_pg_timeout,
- **kwargs):
+ world_size=-1,
+ rank=-1,
+ store=None,
+ group_name=''):
"""
Initializes the default distributed process group, and this will also
- initialize the distributed package
+ initialize the distributed package.
+
+ There are 2 main ways to initialize a process group:
+ 1. Specify ``store``, ``rank``, and ``world_size`` explicitly.
+ 2. Specify ``init_method`` (a URL string) which indicates where/how
+ to discover peers. Optionally specify ``rank`` and ``world_size``,
+ or encode all required parameters in the URL and omit them.
+ If neither is specified, ``init_method`` is assumed to be "env://".
+
Arguments:
backend (str or Backend): The backend to use. Depending on
@@ -323,12 +334,16 @@ def init_process_group(backend,
must have exclusive access to every GPU it uses, as sharing GPUs
between processes can result in deadlocks.
init_method (str, optional): URL specifying how to initialize the
- process group.
+ process group. Default is "env://" if no
+ ``init_method`` or ``store`` is specified.
+ Mutually exclusive with ``store``.
world_size (int, optional): Number of processes participating in
- the job.
+ the job. Required if ``store`` is specified.
rank (int, optional): Rank of the current process.
- store(Store, optional): Rendevous key/value store as an alternative
- to other init methods.
+ Required if ``store`` is specified.
+ store(Store, optional): Key/value store accessible to all workers, used
+ to exchange connection/address information.
+ Mutually exclusive with ``init_method``.
timeout (timedelta, optional): Timeout for operations executed against
the process group. Default value equals 30 minutes.
This is only applicable for the ``gloo`` backend.
@@ -351,15 +366,14 @@ def init_process_group(backend,
raise RuntimeError("trying to initialize the default process group "
"twice!")
- world_size = kwargs.pop('world_size', -1)
- group_name = kwargs.pop('group_name', '')
- rank = kwargs.pop('rank', -1)
- store = kwargs.pop('store', None)
+ assert (store is None) or (init_method is None), \
+ "Cannot specify both init_method and store."
+
if store is not None:
- assert world_size > 0, 'world_size needs to be positive'
- assert rank >= 0, 'rank needs to be non-negative'
- assert len(kwargs) == 0, \
- "got unexpected keyword arguments: %s" % ",".join(kwargs.keys())
+ assert world_size > 0, 'world_size must be positive if using store'
+ assert rank >= 0, 'rank must be non-negative if using store'
+ elif init_method is None:
+ init_method = "env://"
backend = Backend(backend)
@@ -374,6 +388,7 @@ def init_process_group(backend,
timeout=timeout)
else:
# backward compatible API
+ if store is None:
url = init_method
if world_size != -1 and rank != -1:
url += "?rank={}&world_size={}".format(rank, world_size)
@@ -382,7 +397,6 @@ def init_process_group(backend,
elif world_size != -1:
url += "?world_size={}".format(world_size)
- if store is None:
store, rank, world_size = next(rendezvous(url))
store.set_timeout(timeout)
|
Use absolute path for Tmp on Windows CI
We have write permission to the root of the mounted volume so take
advantage of that to create an absolute path to tmp. | @@ -185,13 +185,13 @@ jobs:
git config --global user.name unused
git config --global user.email unused@localhost
git config --global init.defaultBranch main
- mkdir ..\Tmp -Force
+ mkdir \Tmp -Force
- name: Run built-in tests
if: ${{ !inputs.skip_tests }}
env:
GUILD_START_THRESHOLD: 1.0
- TMPDIR: ..\Tmp
+ TMPDIR: \Tmp
run: |
test-env\scripts\guild check -nT
|
[.travis.yml] Setup Travis-CI environment with development dependencies
This conforms to the appveyor environment, which install the development dependencies. | @@ -36,7 +36,7 @@ before_install:
- export PATH=$PATH:/opt/snap/bin
install:
- - pip install -r requirements.txt
+ - pip install -r requirements-dev.txt
- pip install coveralls coverage
- python setup.py install
|
Update dynamic_domain.txt
See also: and ```capturatela.txt``` for ```ddns.com.br``` trail. | @@ -1388,3 +1388,19 @@ b3ta.org
# Reference: https://www.virustotal.com/#/domain/ygto.com
ygto.com
+
+# Reference: https://www.virustotal.com/#/domain/ddns.com.br
+
+ddns.com.br
+
+# Reference: https://www.virustotal.com/#/domain/winconnection.net
+
+winconnection.net
+
+# Reference: https://www.virustotal.com/#/domain/minhaempresa.tv
+
+minhaempresa.tv
+
+# Reference: https://www.virustotal.com/#/domain/minhacasa.tv
+
+minhacasa.tv
|
Address review
Fix fetchers.py | @@ -384,7 +384,7 @@ class ArgoDataFetcher:
index_loader.profile(self._AccessPoint_data['wmo'], self._AccessPoint_data['cyc']).load()
if self._AccessPoint == 'region':
# Convert data box to index box (remove depth info):
- index_box = self._AccessPoint_data['box']
+ index_box = self._AccessPoint_data['box'].copy()
del index_box[4:6]
index_loader.region(index_box).load()
df = index_loader.index
@@ -457,8 +457,12 @@ class ArgoDataFetcher:
"""
self.load()
if ptype in ["dac", "institution"]:
+ if "institution" not in self.index:
+ self.to_index(full=True)
return bar_plot(self.index, by="institution", **kwargs)
elif ptype == "profiler":
+ if "profiler" not in self.index:
+ self.to_index(full=True)
return bar_plot(self.index, by="profiler", **kwargs)
elif ptype == "trajectory":
return plot_trajectory(self.index, **kwargs)
@@ -637,7 +641,7 @@ class ArgoIndexFetcher:
Longitude and latitude bounds are required, while the two bounding dates are optional.
If bounding dates are not specified, the entire time series is fetched.
- Eg: [-60, -55, 40., 45., 0., 10., '2007-08-01', '2007-09-01']
+ Eg: [-60, -55, 40., 45., '2007-08-01', '2007-09-01']
Returns
-------
|
Add a note in rank that all data will be moved into single node
Add a note in rank that all data will be moved into single node | @@ -2181,6 +2181,11 @@ class Series(_Frame, IndexOpsMixin, Generic[T]):
By default, equal values are assigned a rank that is the minimum of the
ranks of those values.
+ .. note:: the current implementation of rank uses Spark's Window without
+ specifying partition specification. This leads to move all data into
+ single partition in single machine and could cause serious
+ performance degradation. Avoid this method against very large dataset.
+
Parameters
----------
method : {'min'}, default 'min'
|
[fuchsia] Enable an example Rust fuzzer
Modifies the allowlist to let one of the example fuzzers written in Rust
run. This is so we can ensure that Rust crashes are handled properly. | @@ -75,7 +75,8 @@ class Fuzzer(object):
production). """
# Strip any sanitizer extensions
tgt = os.path.splitext(tgt)[0]
- return ((pkg == 'example-fuzzers' and tgt != 'out_of_memory_fuzzer') or
+ return ((pkg == 'example-fuzzers' and
+ tgt not in ('out_of_memory_fuzzer', 'toy_example_arbitrary')) or
(pkg == 'zircon_fuzzers' and tgt == 'noop-fuzzer'))
@classmethod
|
Remove acronyms from venue names
A few venues list their acronym in brackets as part of the name field, e.g. "International Conference on Computational Linguistics (COLING)". However, this information is already configured in the acronym field. On the venue's page this results in a duplication: "International Conference on Computational Linguistics (COLING) (COLING)" | @@ -190,7 +190,7 @@ cogalex:
coling:
acronym: COLING
is_toplevel: true
- name: International Conference on Computational Linguistics (COLING)
+ name: International Conference on Computational Linguistics
oldstyle_letter: C
comacoma:
acronym: ComAComA
@@ -284,7 +284,7 @@ ethnlp:
name: Workshop on Ethics in Natural Language Processing
eval4nlp:
acronym: Eval4NLP
- name: The Workshop on Evaluation and Comparison of NLP Systems (Eval4NLP)
+ name: The Workshop on Evaluation and Comparison of NLP Systems
evalnlgeval:
acronym: EvalNLGEval
name: Workshop on Evaluating NLG Evaluation
@@ -372,7 +372,7 @@ hlt:
oldstyle_letter: H
humeval:
acronym: HumEval
- name: The Workshop on Human Evaluation of NLP Systems (HumEval)
+ name: The Workshop on Human Evaluation of NLP Systems
url: https://humeval.github.io/
hytra:
acronym: HyTra
@@ -588,7 +588,7 @@ ngt:
nl4xai:
acronym: NL4XAI
name: Workshop on Interactive Natural Language Technology for Explainable Artificial
- Intelligence (NL4XAI)
+ Intelligence
nli:
acronym: NLI
is_acl: true
@@ -740,7 +740,7 @@ sadaatl:
name: Workshop on Synchronic and Diachronic Approaches to Analyzing Technical Language
scai:
acronym: scai
- name: International Workshop on Search-Oriented Conversational AI (SCAI)
+ name: International Workshop on Search-Oriented Conversational AI
scil:
acronym: SCiL
name: Society for Computation in Linguistics
@@ -750,7 +750,7 @@ sclem:
name: Workshop on Subword and Character LEvel Models in NLP
sdp:
acronym: sdp
- name: Workshop on Scholarly Document Processing (SDP 2020)
+ name: Workshop on Scholarly Document Processing
sedmt:
acronym: SedMT
is_acl: true
|
Make the Spacer widget accept a background color
Fixes | @@ -43,8 +43,11 @@ class Spacer(base._Widget):
DEPRECATED, same as ``length``.
"""
orientations = base.ORIENTATION_BOTH
+ defaults = [
+ ("background", None, "Widget background color")
+ ]
- def __init__(self, length=bar.STRETCH, width=None):
+ def __init__(self, length=bar.STRETCH, width=None, **config):
"""
"""
# 'width' was replaced by 'length' since the widget can be installed in
@@ -54,10 +57,11 @@ class Spacer(base._Widget):
'deprecated. Please use length.')
length = width
- base._Widget.__init__(self, length)
+ base._Widget.__init__(self, length, **config)
+ self.add_defaults(Spacer.defaults)
def draw(self):
- self.drawer.clear(self.bar.background)
+ self.drawer.clear(self.background or self.bar.background)
if self.bar.horizontal:
self.drawer.draw(offsetx=self.offset, width=self.length)
else:
|
Update README.md
Add a link to a WIP review of related work | @@ -96,6 +96,9 @@ the [gym_minigrid](https://github.com/maximecb/gym-minigrid) repository.
You can find here a presentation of the project: [Baby AI Summary](https://docs.google.com/document/d/1WXY0HLHizxuZl0GMGY0j3FEqLaK1oX-66v-4PyZIvdU)
+A work-in-progress review of related work can be found [here]
+(https://www.overleaf.com/13480997qqsxybgstxhg#/52042269/)
+
The Baby AI Game is a game in which an agent existing in a simulated world
will be trained to complete task through reinforcement learning as well
as interactions from one or more human teachers. These interactions will take
|
tell people to run fab from the commcarehq-ansible dir
rather than making an alias | @@ -78,33 +78,9 @@ if 'y' == input('Do you want instructions for how to migrate? [y/N]'):
Run a fab command!
==================
- Enter the fab directory of commcarehq-ansible
-
- cd commcarehq-ansible/fab
-
- Run your fab command
-
fab production deploy
-
- Bonus: Run fab from any directory
- =================================
-
- You will always need to enter the ansible virtualenv to run fab from now on,
- but if you use the following alias, you can run it from anywhere.
-
- alias fab='fab -f ~/.commcare-cloud/repo/fab/fabfile.py'
-
- to make alias always available, add it to the profile file you use for your aliases,
- possibly one of the following:
-
- echo "alias fab='fab -f ~/.commcare-cloud/repo/fab/fabfile.py'" >> ~/.profile
- echo "alias fab='fab -f ~/.commcare-cloud/repo/fab/fabfile.py'" >> ~/.bash_profile
- echo "alias fab='fab -f ~/.commcare-cloud/repo/fab/fabfile.py'" >> ~/.bashrc
-
- Now from anywhere
-
- workon ansible
- fab production deploy
+ Remember that in steady state, you will need to workon the ansible virtualenv
+ and enter the commcarehq-ansible directory before you will be able to run a fab command.
""")
exit(1)
|
docs(config): update configuration doc to show all options
The current version only shows the LSTM options, and fails to show the
other parameters for LogisticRegression, SparseLSTM, and
SparseLogisticRegression. | @@ -47,5 +47,18 @@ The default ``.fonduer-config.yaml`` configuration file is shown below::
bidirectional: True
host_device: "CPU"
max_sentence_length: 100
+ LogisticRegression:
+ bias: False
+ SparseLSTM:
+ emb_dim: 100
+ hidden_dim: 100
+ attention: True
+ dropout: 0.1
+ bidirectional: True
+ host-device: "CPU"
+ max_sentence_length: 100
+ bias: False
+ SparseLogisticRegression:
+ bias: False
.. _Fonduer: https://github.com/HazyResearch/fonduer
|
Adds `jvm_jdk` field to protobufs when the Scala backend is enabled
Registers the `jvm_jdk` field on protobuf sources for the scala version
Closes | @@ -11,6 +11,8 @@ from pants.backend.codegen.protobuf.scala.subsystem import PluginArtifactSpec, S
from pants.backend.codegen.protobuf.target_types import (
ProtobufDependenciesField,
ProtobufSourceField,
+ ProtobufSourcesGeneratorTarget,
+ ProtobufSourceTarget,
)
from pants.backend.scala.target_types import ScalaSourceField
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
@@ -49,6 +51,7 @@ from pants.jvm.jdk_rules import InternalJdk, JvmProcess
from pants.jvm.resolve.common import ArtifactRequirements, Coordinate, GatherJvmCoordinatesRequest
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool
+from pants.jvm.target_types import JvmJdkField
from pants.source.source_root import SourceRoot, SourceRootRequest
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet
@@ -333,10 +336,16 @@ def generate_scalapbc_lockfile_request(
return GenerateJvmLockfileFromTool.create(tool)
+class PrefixedJvmJdkField(JvmJdkField):
+ alias = "jvm_jdk"
+
+
def rules():
return [
*collect_rules(),
*lockfile.rules(),
UnionRule(GenerateSourcesRequest, GenerateScalaFromProtobufRequest),
UnionRule(GenerateToolLockfileSentinel, ScalapbcToolLockfileSentinel),
+ ProtobufSourceTarget.register_plugin_field(PrefixedJvmJdkField),
+ ProtobufSourcesGeneratorTarget.register_plugin_field(PrefixedJvmJdkField),
]
|
Update rocketpy/Flight.py
Suggestion accepted | @@ -2910,7 +2910,7 @@ class Flight:
return None
- def finFlutterAnalysis(self, finThickness, shearModulus):
+ def calculateFinFlutterAnalysis(self, finThickness, shearModulus):
""" Calculate, create and plot the Fin Flutter velocity, based on the
pressure profile provided by Atmosferic model selected. It considers the
Flutter Boundary Equation that is based on a calculation published in
@@ -3529,4 +3529,3 @@ class Flight:
+ str(len(self.parachutes))
+ "}"
)
-
|
[TIR] Add test to cover specific case of reducer match buffer checking
Adding in a test that covers another case that can happen when running a check in reducer.cc [here](https://github.com/apache/tvm/blob/main/src/tir/schedule/analysis/reducer.cc#L590). | @@ -548,6 +548,56 @@ def single_reduction_loop_with_tensorize(
)
[email protected]_func
+def nested_reduction_loop_with_inner_match_buffers(
+ in0: T.Buffer[(4, 16), "int8"],
+ in1: T.Buffer[(4, 16), "int8"],
+ out: T.Buffer[(4, 4), "int32"],
+) -> None:
+ # body
+ # with T.block("root")
+ for y in T.serial(4):
+ with T.block("C"):
+ yi = T.axis.spatial(4, y)
+ T.reads(in0[yi, 0:16], in1[yi, 0:16])
+ T.writes(out[yi, 0:4])
+ for x in T.serial(4):
+ with T.block("C"):
+ xr = T.axis.reduce(4, x)
+ with T.init():
+ for i in T.serial(4):
+ with T.block("C_init"):
+ ii = T.axis.spatial(4, i)
+ T.reads()
+ T.writes(out[yi, ii])
+ out[yi, ii] = 0
+ with T.block("C"):
+ T.reads(
+ out[yi, xr],
+ in0[yi, yi * 4 + xr : yi * 4 + xr + 4],
+ in1[yi, yi * 4 + xr : yi * 4 + xr + 4],
+ )
+ T.writes(out[yi, xr])
+ A = T.match_buffer(
+ in0[yi, yi * 4 + xr : yi * 4 + xr + 4],
+ [4],
+ dtype="int8",
+ offset_factor=1,
+ )
+ B = T.match_buffer(
+ in1[yi, yi * 4 + xr : yi * 4 + xr + 4],
+ [4],
+ dtype="int8",
+ offset_factor=1,
+ )
+ C = T.match_buffer(out[yi, xr], [1], dtype="int32", offset_factor=1)
+ A_i8x4: T.int8x4 = A[0:4]
+ A_i32: T.int32 = T.reinterpret(A_i8x4, dtype="int32")
+ B_i8x4: T.int8x4 = B[0:4]
+ B_i32: T.int32 = T.reinterpret(B_i8x4, dtype="int32")
+ C[0] = A_i32 + B_i32 + C[0]
+
+
@T.prim_func
def reducer_max(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
@@ -1247,6 +1297,13 @@ def test_single_reduction_loop_with_tensorize():
)
+def test_nested_reduction_loop_with_inner_match_buffers():
+ _check(
+ nested_reduction_loop_with_inner_match_buffers,
+ nested_reduction_loop_with_inner_match_buffers,
+ )
+
+
def test_reducer_max():
_check(reducer_max, lowered_reducer_max)
|
Restyle checkboxes to be less visually abrasive.
This restyles the checkboxes to be more subtle in their rest and
unchecked state so they don't create too much visual noise on the
page. | position: relative;
top: -2px;
- padding: 1px;
+ padding: 2px;
margin: 0px 5px 0px 0px;
- height: 12px;
- width: 12px;
+ height: 10px;
+ width: 10px;
font-weight: 300;
line-height: 0.8;
font-size: 1.3rem;
text-align: center;
- border: 2px solid hsl(0, 0%, 80%);
+ border: 1px solid hsl(0, 0%, 75%);
color: hsl(0, 0%, 80%);
border-radius: 4px;
- -webkit-filter: grayscale(1) brightness(0.7);
+ -webkit-filter: grayscale(1);
cursor: pointer;
}
|
Updated Dev Documentation
Added line for installing developer tools for local development
Fixes: | @@ -88,6 +88,7 @@ See the previous section for instructions.
mkvirtualenv cirq-py3 --python=/usr/bin/python3
python -m pip install --upgrade pip
python -m pip install -e .[dev_env]
+ python -m pip install -r dev_tools/conf/pip-list-dev-tools.txt
```
(When you later open another terminal, you can activate the virtualenv with `workon cirq-py3`.)
|
Fix the new jinja_test failures
Refs
The max fuction returns an int, not a float, as does the min function.
This updates the assertions to compare against ints. | @@ -946,13 +946,13 @@ class TestCustomExtensions(TestCase):
'''Test the `min` Jinja filter.'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | min }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
- self.assertEqual(rendered, u'1.0')
+ self.assertEqual(rendered, u'1')
def test_max(self):
'''Test the `max` Jinja filter.'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | max }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
- self.assertEqual(rendered, u'3.0')
+ self.assertEqual(rendered, u'3')
def test_avg(self):
'''Test the `avg` Jinja filter.'''
|
PEP Improvisations: Move errors sending from PEP command to `get_pep_embed`
Before this, all error embeds was returned on `get_pep_embed` but now this send this itself and return only correct embed to make checking easier in command. | @@ -4,7 +4,7 @@ import re
import unicodedata
from email.parser import HeaderParser
from io import StringIO
-from typing import Dict, Tuple, Union
+from typing import Dict, Optional, Tuple, Union
from discord import Colour, Embed
from discord.ext.commands import BadArgument, Cog, Context, command
@@ -220,12 +220,11 @@ class Utils(Cog):
# Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.
if pep_number == 0:
pep_embed = self.get_pep_zero_embed()
- success = True
else:
- pep_embed, success = await self.get_pep_embed(pep_number)
- await ctx.send(embed=pep_embed)
+ pep_embed = await self.get_pep_embed(pep_number, ctx)
- if success:
+ if pep_embed:
+ await ctx.send(embed=pep_embed)
log.trace(f"PEP {pep_number} getting and sending finished successfully. Increasing stat.")
self.bot.stats.incr(f"pep_fetches.{pep_number}")
@@ -244,12 +243,15 @@ class Utils(Cog):
return pep_embed
@async_cache(arg_offset=1)
- async def get_pep_embed(self, pep_nr: int) -> Tuple[Embed, bool]:
+ async def get_pep_embed(self, pep_nr: int, ctx: Context) -> Optional[Embed]:
"""Fetch, generate and return PEP embed."""
if pep_nr not in self.peps:
log.trace(f"PEP {pep_nr} was not found")
not_found = f"PEP {pep_nr} does not exist."
- return Embed(title="PEP not found", description=not_found, colour=Colour.red()), False
+ await ctx.send(
+ embed=Embed(title="PEP not found", description=not_found, colour=Colour.red())
+ )
+ return
response = await self.bot.http_session.get(self.peps[pep_nr])
if response.status == 200:
@@ -274,7 +276,7 @@ class Utils(Cog):
# embed field values can't contain an empty string
if pep_header.get(field, ""):
pep_embed.add_field(name=field, value=pep_header[field])
- return pep_embed, True
+ return pep_embed
else:
log.trace(
f"The user requested PEP {pep_nr}, but the response had an unexpected status code: "
@@ -282,7 +284,10 @@ class Utils(Cog):
)
error_message = "Unexpected HTTP error during PEP search. Please let us know."
- return Embed(title="Unexpected error", description=error_message, colour=Colour.red()), False
+ await ctx.send(
+ embed=Embed(title="Unexpected error", description=error_message, colour=Colour.red())
+ )
+ return
def setup(bot: Bot) -> None:
|
build: don't force-push git branches needed for historical builds
closes | @@ -167,7 +167,7 @@ RUN cd /opt \
&& cd buildozer \
&& git remote add sombernight https://github.com/SomberNight/buildozer \
&& git fetch --all \
- # commit: from branch sombernight/electrum_20210421
+ # commit: from branch sombernight/electrum_20210421 (note: careful with force-pushing! see #8162)
&& git checkout "6f03256e8312f8d1e5a6da3a2a1bf06e2738325e^{commit}" \
&& python3 -m pip install --no-build-isolation --no-dependencies --user -e .
@@ -178,7 +178,7 @@ RUN cd /opt \
&& git remote add sombernight https://github.com/SomberNight/python-for-android \
&& git remote add accumulator https://github.com/accumulator/python-for-android \
&& git fetch --all \
- # commit: from branch accumulator/electrum_20210421d
+ # commit: from branch accumulator/electrum_20210421d (note: careful with force-pushing! see #8162)
&& git checkout "d33e07ba4c7931da46122a32f3807709a73cb7f6^{commit}" \
&& python3 -m pip install --no-build-isolation --no-dependencies --user -e .
|
Address
Address
correct merge error, restore import of eye_zoom_mouse
comment cleanup | import os
from talon import Module, actions, app, clip, cron, ctrl, imgui, noise, ui
+from talon_plugins import eye_zoom_mouse
key = actions.key
self = actions.self
@@ -110,7 +111,7 @@ class Actions:
def mouse_wake():
"""Enable control mouse, zoom mouse, and disables cursor"""
actions.tracking.control_zoom_toggle(True)
- # eye_mouse.control_mouse.enable()
+
if setting_mouse_wake_hides_cursor.get() >= 1:
show_cursor_helper(False)
@@ -249,10 +250,7 @@ def show_cursor_helper(show):
def on_pop(active):
if setting_mouse_enable_pop_stops_scroll.get() >= 1 and (gaze_job or scroll_job):
stop_scroll()
- elif (
- not eye_zoom_mouse.zoom_mouse.enabled
- and eye_mouse.mouse.attached_tracker is not None
- ):
+ elif not actions.tracking.control_zoom_enabled():
if setting_mouse_enable_pop_click.get() >= 1:
ctrl.mouse_click(button=0, hold=16000)
@@ -276,17 +274,13 @@ def mouse_scroll(amount):
def scroll_continuous_helper():
global scroll_amount
# print("scroll_continuous_helper")
- if scroll_amount and (
- eye_zoom_mouse.zoom_mouse.state == eye_zoom_mouse.STATE_IDLE
- ): # or eye_zoom_mouse.zoom_mouse.state == eye_zoom_mouse.STATE_SLEEP):
+ if scroll_amount and (eye_zoom_mouse.zoom_mouse.state == eye_zoom_mouse.STATE_IDLE):
actions.mouse_scroll(by_lines=False, y=int(scroll_amount / 10))
def start_scroll():
global scroll_job
scroll_job = cron.interval("60ms", scroll_continuous_helper)
- # if eye_zoom_mouse.zoom_mouse.enabled and eye_mouse.mouse.attached_tracker is not None:
- # eye_zoom_mouse.zoom_mouse.sleep(True)
def gaze_scroll():
@@ -340,13 +334,8 @@ def stop_scroll():
continuous_scoll_mode = ""
- # if eye_zoom_mouse.zoom_mouse.enabled and eye_mouse.mouse.attached_tracker is not None:
- # eye_zoom_mouse.zoom_mouse.sleep(False)
-
def start_cursor_scrolling():
global scroll_job, gaze_job
stop_scroll()
gaze_job = cron.interval("60ms", gaze_scroll)
- # if eye_zoom_mouse.zoom_mouse.enabled and eye_mouse.mouse.attached_tracker is not None:
- # eye_zoom_mouse.zoom_mouse.sleep(True)
|
Clarify purpose of all service sms test
The claim in the comment was once correct [^1] but no longer [^2],
so this is really just a base case test of what the function returns.
[^1]:
[^2]: | @@ -649,8 +649,7 @@ def test_fetch_sms_free_allowance_remainder_until_date_with_two_services(notify_
assert service_2_result[0] == (service_2.id, 20, 22, 0)
-def test_fetch_usage_for_all_services_sms_for_first_quarter(notify_db_session):
- # This test is useful because the inner query resultset is empty.
+def test_fetch_usage_for_all_services_sms(notify_db_session):
service = create_service(service_name='a - has free allowance')
template = create_template(service=service)
org = create_organisation(name="Org for {}".format(service.name))
|
Update mouse.py
remove debug spam | @@ -32,7 +32,7 @@ def mouse_scroll(amount):
def scroll_continuous_helper():
global scroll_amount
- print("scroll_continuous_helper")
+ #print("scroll_continuous_helper")
if scroll_amount:
actions.mouse_scroll(by_lines=False, y=int(scroll_amount / 10))
|
Expand expected assumerole exceptions
This adds 'AccessDenied' to the list of expected assumerole exceptions.
The api will *usually* return 'InvalidClientTokenId', but not always. | @@ -225,7 +225,7 @@ class TestAssumeRoleCredentials(BaseEnvVar):
return result['Credentials']
except ClientError as e:
code = e.response.get('Error', {}).get('Code')
- if code == "InvalidClientTokenId":
+ if code in ["InvalidClientTokenId", "AccessDenied"]:
time.sleep(delay)
else:
raise
|
Langkit_Support.Bump_Ptr.Vectors: minor reformatting
(no-tn-check) | @@ -20,8 +20,7 @@ package Langkit_Support.Bump_Ptr.Vectors is
subtype Index_Type is Positive;
type Vector is private
- with Iterable =>
- (First => First,
+ with Iterable => (First => First,
Next => Next,
Has_Element => Has_Element,
Element => Get);
@@ -59,8 +58,7 @@ package Langkit_Support.Bump_Ptr.Vectors is
-- Get the element at Index
function Get_At_Index (Self : Vector; I : Index_Type) return Element_Type
- with
- Inline,
+ with Inline,
Pre => I <= Last_Index (Self);
-- Get the element at Index
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.