message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Run ceph-ansible using tripleo-ansible-inventory
Remove the generate inventory task and instead use the
same inventory of the calling playbook which should now
contain the inventory groups ceph-ansible expects as per
the depends-on patch.
Depends-On: | @@ -414,31 +414,14 @@ outputs:
- "{{playbook_dir}}/ceph-ansible/group_vars"
- "{{playbook_dir}}/ceph-ansible/host_vars"
- "{{playbook_dir}}/ceph-ansible/fetch_dir"
- - name: generate inventory
- copy:
+ - name: symbolic link to tripleo inventory from ceph-ansible work directory
+ # If we call ceph-ansible with the same inventory as the calling
+ # playbook, then config-download/groups_vars will be used instead
+ # of config-download/ceph-ansible/group_vars.
+ file:
+ src: "{{inventory_file}}"
dest: "{{playbook_dir}}/ceph-ansible/inventory.yml"
- content: |
- {%- set ceph_groups = ['mgr', 'mon', 'osd', 'mds', 'rgw', 'nfs', 'rbdmirror', 'client'] -%}
- {%- for ceph_group in ceph_groups -%}
- {%- if 'ceph_' ~ ceph_group in groups -%}
- {%- set ceph_group_hosts = groups['ceph_' ~ ceph_group] | difference(blacklisted_hostnames) -%}
- {%- if ceph_group_hosts|length > 0 %}
-
- {{ ceph_group ~ 's:' }}
- hosts:
- {% for host in ceph_group_hosts -%}
- {{ host }}:
- ansible_user: {{ hostvars.raw_get(host)['ansible_ssh_user'] | default('root') }}
- ansible_host: {{ hostvars.raw_get(host)['ansible_host'] | default(host) }}
- {% if hostvars.raw_get(host)['ansible_connection'] | default('') == 'local' -%}
- ansible_connection: local
- {% endif -%}
- ansible_become: true
- {% endfor -%}
-
- {%- endif -%}
- {%- endif -%}
- {%- endfor %}
+ state: link
- name: set ceph-ansible group vars all
set_fact:
ceph_ansible_group_vars_all: {get_attr: [CephBaseAnsibleVars, value, vars]}
|
Use setdefault instead of get with a default
Use a dict for constructing the force:org:create command | @@ -932,7 +932,7 @@ class ScratchOrgConfig(OrgConfig):
@property
def days(self):
- return self.config.get('days', 7)
+ return self.config.setdefault('days', 7)
@property
def expired(self):
@@ -957,27 +957,18 @@ class ScratchOrgConfig(OrgConfig):
if not self.scratch_org_type:
self.config['scratch_org_type'] = 'workspace'
- devhub = ''
- if self.devhub:
- devhub = ' --targetdevhubusername {}'.format(self.devhub)
-
- namespaced = ''
- if not self.namespaced:
- namespaced = ' -n'
-
- days = ''
- if self.days:
- days = ' --durationdays {}'.format(self.days)
-
- alias = ''
- if self.sfdx_alias:
- alias = ' -a {}'.format(self.sfdx_alias)
+ options = {
+ 'config_file': self.config_file,
+ 'devhub': ' --targetdevhubusername {}'.format(self.devhub) if self.devhub else '',
+ 'namespaced': ' -n' if not self.namespaced else '',
+ 'days': ' --durationdays {}'.format(self.days) if self.days else '',
+ 'alias': ' -a {}'.format(self.sfdx_alias) if self.sfdx_alias else '',
+ 'extraargs': os.environ.get('SFDX_ORG_CREATE_ARGS', ''),
+ }
# This feels a little dirty, but the use cases for extra args would mostly
# work best with env vars
- extraargs = os.environ.get('SFDX_ORG_CREATE_ARGS', '')
- command = 'sfdx force:org:create -f {}{}{}{}{} {}'.format(
- self.config_file, devhub, namespaced, alias, days, extraargs)
+ command = 'sfdx force:org:create -f {config_file}{devhub}{namespaced}{days}{alias} {extraargs}'.format(**options)
self.logger.info(
'Creating scratch org with command {}'.format(command))
p = sarge.Command(command, stdout=sarge.Capture(buffer_size=-1))
|
CV GUI: set IR defaults based on command line options
(they seem to be auto-applied: callback called on start) | @@ -481,9 +481,9 @@ class Demo:
irDrivers = self._device.getIrDrivers()
if irDrivers:
print('IR drivers detected on OAK-D Pro:', [f'{d[0]} on bus {d[1]}' for d in irDrivers])
- Trackbars.createTrackbar('IR Laser Dot Projector [mA]', queueName, 0, 1200, 0,
+ Trackbars.createTrackbar('IR Laser Dot Projector [mA]', queueName, 0, 1200, self._conf.args.irDotBrightness,
lambda value: self._device.setIrLaserDotProjectorBrightness(value))
- Trackbars.createTrackbar('IR Flood Illuminator [mA]', queueName, 0, 1500, 0,
+ Trackbars.createTrackbar('IR Flood Illuminator [mA]', queueName, 0, 1500, self._conf.args.irFloodBrightness,
lambda value: self._device.setIrFloodLightBrightness(value))
def _updateCameraConfigs(self):
|
Improve tests
Tests didn't abide by one of the validation conditions for shadow forms.
That is, they didn't include ever action in the source form. | @@ -33,6 +33,14 @@ class ShadowFormSuiteTest(SimpleTestCase, TestXmlMixin):
self.shadow_form = self.factory.new_shadow_form(self.advanced_module)
self.shadow_form.shadow_parent_form_id = self.form0.unique_id
+ # Shadow form load_update_case actions should contain all case tags from the parent
+ self.shadow_form.extra_actions.load_update_cases = [
+ LoadUpdateAction(
+ case_type="patient",
+ case_tag="load_0",
+ details_module=self.advanced_module.unique_id,
+ )
+ ]
self.basic_module = self.factory.new_basic_module("basic_module", "doctor", with_form=False)
@@ -73,8 +81,14 @@ class ShadowFormSuiteTest(SimpleTestCase, TestXmlMixin):
def test_shadow_form_action_additions(self):
# Confirm that shadow form action additions are reflected in the suite file
+ original_actions = self.shadow_form.extra_actions.load_update_cases
try:
self.shadow_form.extra_actions.load_update_cases = [
+ LoadUpdateAction(
+ case_type="patient",
+ case_tag="load_0",
+ details_module=self.advanced_module.unique_id,
+ ),
LoadUpdateAction(
case_tag="load_1",
case_type="doctor",
@@ -84,7 +98,7 @@ class ShadowFormSuiteTest(SimpleTestCase, TestXmlMixin):
suite = self.factory.app.create_suite()
finally:
# reset the actions
- self.shadow_form.extra_actions = AdvancedFormActions()
+ self.shadow_form.extra_actions.load_update_cases = original_actions
# Confirm that the source session has not changed:
expected_source_session = """
@@ -129,6 +143,7 @@ class ShadowFormSuiteTest(SimpleTestCase, TestXmlMixin):
def test_shadow_form_action_modifications(self):
# Confirm that shadow form action modifications are reflected in the suite file
+ original_actions = self.shadow_form.extra_actions.load_update_cases
try:
self.shadow_form.extra_actions.load_update_cases = [
LoadUpdateAction(
@@ -140,7 +155,7 @@ class ShadowFormSuiteTest(SimpleTestCase, TestXmlMixin):
suite = self.factory.app.create_suite()
finally:
# reset the actions
- self.shadow_form.extra_actions = AdvancedFormActions()
+ self.shadow_form.extra_actions.load_update_cases = original_actions
# Confirm that the source session has not changed:
expected_source_session = """
|
feat: separate namescope added to optimizer. Optimizer variables
excluded from save/load methods | @@ -52,14 +52,19 @@ class TFModel(NNModel, metaclass=TfModelMeta):
# Check presence of the model files
if tf.train.checkpoint_exists(path):
print('[loading model from {}]'.format(path), file=sys.stderr)
- saver = tf.train.Saver()
+ # Exclude optimizer variables from saved variables
+ var_list = [var for var in tf.trainable_variables()
+ if not var.name.startswith('Optimizer')]
+ saver = tf.train.Saver(var_list)
saver.restore(self.sess, path)
def save(self):
"""Save model parameters to self.save_path"""
path = str(self.save_path.resolve())
print('[saving model to {}]'.format(path), file=sys.stderr)
- saver = tf.train.Saver()
+ var_list = [var for var in tf.trainable_variables()
+ if not var.name.startswith('Optimizer')]
+ saver = tf.train.Saver(var_list)
saver.save(self.sess, path)
@abstractmethod
@@ -87,7 +92,7 @@ class TFModel(NNModel, metaclass=TfModelMeta):
Returns:
train_op
"""
-
+ with tf.variable_scope('Optimizer'):
if learnable_scopes is None:
variables_to_train = tf.trainable_variables()
else:
|
Metadata API: add exception tests
Add missing tests testing raising documented
exceptions for "Metadata.sign()",
"Metadata.to_file()" and "Metadata.from_file()". | @@ -13,6 +13,7 @@ import shutil
import sys
import tempfile
import unittest
+from copy import copy
from datetime import datetime, timedelta
from typing import Any, ClassVar, Dict
@@ -126,6 +127,16 @@ class TestMetadata(unittest.TestCase):
os.remove(bad_metadata_path)
+ def test_md_read_write_file_exceptions(self) -> None:
+ # Test writing to a file with bad filename
+ with self.assertRaises(exceptions.StorageError):
+ Metadata.from_file("bad-metadata.json")
+
+ # Test serializing to a file with bad filename
+ with self.assertRaises(exceptions.StorageError):
+ md = Metadata.from_file(f"{self.repo_dir}/metadata/root.json")
+ md.to_file("")
+
def test_compact_json(self) -> None:
path = os.path.join(self.repo_dir, "metadata", "targets.json")
md_obj = Metadata.from_file(path)
@@ -212,6 +223,17 @@ class TestMetadata(unittest.TestCase):
with self.assertRaises(exceptions.UnsignedMetadataError):
targets_key.verify_signature(md_obj)
+ def test_sign_failures(self) -> None:
+ # Test throwing UnsignedMetadataError because of signing problems
+ # related to bad information in the signer.
+ md = Metadata.from_file(f"{self.repo_dir}/metadata/snapshot.json")
+ key_dict = copy(self.keystore[Snapshot.type])
+ key_dict["keytype"] = "rsa"
+ key_dict["scheme"] = "bad_scheme"
+ sslib_signer = SSlibSigner(key_dict)
+ with self.assertRaises(exceptions.UnsignedMetadataError):
+ md.sign(sslib_signer)
+
def test_verify_failures(self) -> None:
root_path = os.path.join(self.repo_dir, "metadata", "root.json")
root = Metadata[Root].from_file(root_path).signed
|
STY: fixed import order
Fixed the order of imports. | @@ -9,9 +9,9 @@ for the pysat data directory structure.
from pysat.utils._core import available_instruments
from pysat.utils._core import display_available_instruments
from pysat.utils._core import display_instrument_stats
+from pysat.utils._core import get_mapped_value
from pysat.utils._core import generate_instrument_list
from pysat.utils._core import listify
-from pysat.utils._core import get_mapped_value
from pysat.utils._core import load_netcdf4
from pysat.utils._core import NetworkLock
from pysat.utils._core import scale_units
|
Also fix Hero shelves/modules cta_urls that start with EXTERNAL_SITE_URL
* Also fix Hero shelves/modules cta_urls that start with EXTERNAL_SITE_URL
In the admin, SITE_URL is different from EXTERNAL_SITE_URL, let's fix
both.
* Fix tests | @@ -244,7 +244,9 @@ class CTACheckMixin:
# Avoid locale & app prefixes in URLs for SecondaryHero/Module for our
# own URLs: addons-frontend will automatically add the right ones
# according to current context when displaying them.
- if self.cta_url.startswith(('/', settings.SITE_URL)):
+ if self.cta_url.startswith(
+ ('/', settings.SITE_URL, settings.EXTERNAL_SITE_URL)
+ ):
parsed = urlparse(self.cta_url)
try:
match = resolve_with_trailing_slash(parsed.path)
|
Expose transform method in preparation for http endpoint
Move the version check as the transform method can be called independently. | @@ -22,16 +22,17 @@ class SpecFactory(object):
fiaas_version = app_config.get(u"version", 1)
self._fiaas_counter.labels(fiaas_version).inc()
LOG.info("Attempting to create app_spec for %s from fiaas.yml version %s", name, fiaas_version)
- if fiaas_version not in self._supported_versions:
- raise InvalidConfiguration("Requested version %s, but the only supported versions are: %r" %
- (fiaas_version, self._supported_versions))
- app_config = self._transform(app_config)
+ app_config = self.transform(app_config)
app_spec = self._factory(name, image, teams, tags, app_config, deployment_id, namespace)
self._validate(app_spec)
return app_spec
- def _transform(self, app_config):
- current_version = app_config.get(u"version", 1)
+ def transform(self, app_config):
+ fiaas_version = app_config.get(u"version", 1)
+ if fiaas_version not in self._supported_versions:
+ raise InvalidConfiguration("Requested version %s, but the only supported versions are: %r" %
+ (fiaas_version, self._supported_versions))
+ current_version = fiaas_version
while current_version < self._factory.version:
app_config = self._transformers[current_version](app_config)
current_version = app_config.get(u"version", 1)
|
[PR updated per review comments [skip appveyor] [skip travis]
Moved section on Python requirement to top;
changed description of "side effects" in -c mode. | @@ -109,6 +109,15 @@ and a database of information about previous builds so
details do not have to be recalculated each run.
</para>
+<para>&scons; requires Python 3.5 or later to run;
+there should be no other dependencies or requirements.
+<emphasis>
+Support for Python 3.5 is deprecated since
+&SCons; 4.2 and will be dropped in a future release.
+The CPython project has retired 3.5:
+<ulink url="https://www.python.org/dev/peps/pep-0478"/>.
+</emphasis></para>
+
<para>You set up an &SCons;
build system by writing a script
that describes things to build (<firstterm>targets</firstterm>), and,
@@ -389,18 +398,6 @@ and the Intel compiler tools.
These default values may be overridden
by appropriate setting of &consvars;.</para>
-<para>&scons;
-requires Python 3.5 or higher.
-There should be no other dependencies or requirements to run &scons;.
-</para>
-
-<para><emphasis>
-Support for Python 3.5 is deprecated since
-&SCons; 4.2 and will be dropped in a future release.
-The CPython project has retired 3.5:
-<ulink url="https://www.python.org/dev/peps/pep-0478"/>.
-</emphasis></para>
-
<refsect2 id='target_selection'>
<title>Target Selection</title>
@@ -600,10 +597,10 @@ Will not remove any targets which are marked for
preservation through calls to the &f-link-NoClean; function.
</para>
<para>
-Files created directly by Python code in SConscript files,
-as opposed to work scheduled to builder actions during the build phase,
-are not affected by clean mode. If it is important to clean up
-some other work in clean mode, steps need to be added to handle that.
+While clean mode removes targets rather than building them,
+work which is done directly in Python code in SConscript files
+will still be carried out. If it is important to avoid some
+such work from taking place in clean mode, it should be protected.
An SConscript file can determine which mode
is active by querying &f-link-GetOption;, as in the call
<code>if GetOption("clean"):</code>
|
Add ec pool support
Modified tests/rbd_system.py | @@ -22,7 +22,11 @@ def run(**kw):
config = kw.get('config')
script_name = config.get('test_name')
timeout = config.get('timeout', 1800)
- command = 'sudo python ~/' + test_folder + '/ceph-qe-scripts/rbd/system/' + script_name
+ if config.get('ec-pool-k-m', None):
+ ec_pool_arg = ' --ec-pool-k-m ' + config.get('ec-pool-k-m')
+ else:
+ ec_pool_arg = ''
+ command = 'sudo python ~/' + test_folder + '/ceph-qe-scripts/rbd/system/' + script_name + ec_pool_arg
stdout, stderr = client_node.exec_command(cmd=command, timeout=timeout, check_ec=False)
output = stdout.read()
if output:
|
Bugfix multipart form parsing field storage usage
The previous version would add FieldStorage objects, rather than the
value of the FieldStorage object into the form data. It also corrects
the unnecessary and hack looking value extraction. | @@ -177,8 +177,8 @@ class Request(BaseRequestWebsocket, JSONMixin):
for key in field_storage: # type: ignore
field_storage_key = field_storage[key]
if isinstance(field_storage_key, list):
- for value in field_storage_key:
- self._form.add(key, value)
+ for item in field_storage_key:
+ self._form.add(key, item.value)
elif (
isinstance(field_storage_key, FieldStorage) and
field_storage_key.filename is not None
@@ -188,7 +188,7 @@ class Request(BaseRequestWebsocket, JSONMixin):
field_storage_key.name, field_storage_key.type, field_storage_key.headers, # type: ignore # noqa: E501
)
else:
- self._form.add(key, str(field_storage_key.file.read()))
+ self._form.add(key, field_storage_key.value)
@property
def content_encoding(self) -> Optional[str]:
|
osclient: Pass endpoint_type in kw_args to client
Heatclient expects a value for endpoint_type from kw_args or it
uses None. Rally.osclient does not pass endpoint_type to the
heatclient. To rectify this, the endpoint_type variable is
explicitly placed in kw_args and kw_args passed to
the Heat client constructor. | @@ -388,12 +388,17 @@ class Heat(OSClient):
"""Return heat client."""
from heatclient import client as heat
+ kw_args = {}
+ if self.credential.endpoint_type:
+ kw_args["endpoint_type"] = self.credential.endpoint_type
+
client = heat.Client(
self.choose_version(version),
session=self.keystone.get_session()[0],
# Remove endpoint once requirement is python-heatclient>=1.6
endpoint=self._get_endpoint(service_type),
- endpoint_override=self._get_endpoint(service_type))
+ endpoint_override=self._get_endpoint(service_type),
+ **kw_args)
return client
|
Ignore facter error when fetching repo
Even though a repo is set with skip_if_unavailable=True,
Facter logs "Error:..." and then "Ignoring repositories...".
We need to add this regexp to ignored list.
Note: if repo should not be skipped if unavailable, dnf
would have raised error before Facter run. | @@ -43,7 +43,10 @@ re_ignore = re.compile(
'yum.*?install swift-plugin-s3|'
# facter gives a weird NM error when it's disabled, due to
# https://tickets.puppetlabs.com/browse/FACT-697
- 'NetworkManager is not running'
+ 'NetworkManager is not running|'
+ # facter logs Error even though the repository is set to be skipped
+ # if unavailable
+ 'Failed to download metadata for repo'
)
re_notice = re.compile(r'notice: .*Notify\[packstack_info\]'
r'\/message: defined \'message\' as '
|
Add description to policies in migrate_server.py
blueprint policy-docs | # License for the specific language governing permissions and limitations
# under the License.
-from oslo_policy import policy
-
from nova.policies import base
@@ -22,12 +20,26 @@ POLICY_ROOT = 'os_compute_api:os-migrate-server:%s'
migrate_server_policies = [
- policy.RuleDefault(
- name=POLICY_ROOT % 'migrate',
- check_str=base.RULE_ADMIN_API),
- policy.RuleDefault(
- name=POLICY_ROOT % 'migrate_live',
- check_str=base.RULE_ADMIN_API),
+ base.create_rule_default(
+ POLICY_ROOT % 'migrate',
+ base.RULE_ADMIN_API,
+ "Cold migrate a server to a host",
+ [
+ {
+ 'method': 'POST',
+ 'path': '/servers/{server_id}/action (migrate)'
+ }
+ ]),
+ base.create_rule_default(
+ POLICY_ROOT % 'migrate_live',
+ base.RULE_ADMIN_API,
+ "Live migrate a server to a new host without a reboot",
+ [
+ {
+ 'method': 'POST',
+ 'path': '/servers/{server_id}/action (os-migrateLive)'
+ }
+ ]),
]
|
Limit test to single domain
to avoid breaking on tests that don't clean up properly | @@ -469,11 +469,14 @@ class TestAggregations(ElasticTestMixin, SimpleTestCase):
@es_test
class TestDateHistogram(SimpleTestCase):
+ domain = str(uuid.uuid4())
+
@classmethod
def setUpClass(cls):
super().setUpClass()
forms = [{
'_id': str(uuid.uuid4()),
+ 'domain': cls.domain,
'received_on': datetime.fromisoformat(d),
} for d in [
'2021-12-09',
@@ -501,34 +504,33 @@ class TestDateHistogram(SimpleTestCase):
ensure_index_deleted(XFORM_INDEX_INFO.index)
super().tearDownClass()
- def test_year_histogram(self):
- res = (FormES()
+ def _run_aggregation(self, aggregation):
+ return (FormES()
.remove_default_filters()
- .aggregation(DateHistogram('submissions', 'received_on', DateHistogram.Interval.YEAR))
+ .domain(self.domain)
+ .aggregation(aggregation)
.run())
+
+ def test_year_histogram(self):
+ res = self._run_aggregation(DateHistogram(
+ 'submissions', 'received_on', DateHistogram.Interval.YEAR))
counts = res.aggregations.submissions.counts_by_bucket()
self.assertEqual(16, counts['2022'])
def test_month_histogram(self):
- res = (FormES()
- .remove_default_filters()
- .aggregation(DateHistogram('submissions', 'received_on', DateHistogram.Interval.MONTH))
- .run())
+ res = self._run_aggregation(DateHistogram(
+ 'submissions', 'received_on', DateHistogram.Interval.MONTH))
counts = res.aggregations.submissions.counts_by_bucket()
self.assertEqual(5, counts['2022-03'])
def test_day_histogram(self):
- res = (FormES()
- .remove_default_filters()
- .aggregation(DateHistogram('submissions', 'received_on', DateHistogram.Interval.DAY))
- .run())
+ res = self._run_aggregation(DateHistogram(
+ 'submissions', 'received_on', DateHistogram.Interval.DAY))
counts = res.aggregations.submissions.counts_by_bucket()
self.assertEqual(2, counts['2022-03-13'])
def test_only_nonzero_buckets_returned(self):
- res = (FormES()
- .remove_default_filters()
- .aggregation(DateHistogram('submissions', 'received_on', DateHistogram.Interval.DAY))
- .run())
+ res = self._run_aggregation(DateHistogram(
+ 'submissions', 'received_on', DateHistogram.Interval.DAY))
counts = res.aggregations.submissions.counts_by_bucket()
self.assertEqual(15, len(counts))
|
Make dials.apply_mask apply the mask ImageBool
...in addition to simply setting a reference to the mask filename.
This change will only come to have any meaning once dials.apply_mask is reformatted according to the DIALS functional interface boilerplate. | from __future__ import absolute_import, division, print_function
+import cPickle as pickle
+
+from dxtbx.format.image import ImageBool
from iotbx.phil import parse
help_message = """
@@ -97,7 +100,10 @@ def run(self):
for i, imageset in enumerate(imagesets):
# Set the lookup
+ with open(params.input.mask[i]) as f:
+ mask = pickle.load(f)
imageset.external_lookup.mask.filename = params.input.mask[i]
+ imageset.external_lookup.mask.data = ImageBool(mask)
# Dump the experiments
print("Writing experiments to %s" % params.output.experiments)
|
fix(config_flow): display claimspicker_message correctly
Closes | @@ -292,10 +292,12 @@ class AlexaMediaFlowHandler(config_entries.ConfigFlow):
"claimspicker_required" in login.status
and login.status["claimspicker_required"]
):
- message = "> {0}".format(
- login.status["error_message"] if "error_message" in login.status else ""
+ error_message = "> {0}".format(
+ login.status["error_message"]
+ if "error_message" in login.status else ""
)
_LOGGER.debug("Creating config_flow to select verification method")
+ claimspicker_message = login.status["claimspicker_message"]
return await self._show_form(
"claimspicker",
data_schema=vol.Schema(self.claimspicker_schema),
@@ -303,7 +305,8 @@ class AlexaMediaFlowHandler(config_entries.ConfigFlow):
placeholders={
"email": login.email,
"url": login.url,
- "message": message,
+ "message": "> {0}\n> {1}".format(claimspicker_message,
+ error_message),
},
)
elif (
|
Update 4.2 swift version
Uses the swift version that ships with Xcode 10 Beta 3. | @@ -68,9 +68,9 @@ supported_configs = {
},
'4.2': {
'version': 'Apple Swift version 4.2 '
- '(swiftlang-1000.0.16.9 clang-1000.10.25.3)\n'
+ '(swiftlang-1000.0.25.1 clang-1000.10.28.1)\n'
'Target: x86_64-apple-darwin17.7.0\n',
- 'description': 'Xcode 10 Beta 2 (contains Swift 4.2)',
+ 'description': 'Xcode 10 Beta 3 (contains Swift 4.2)',
'branch': 'swift-4.2-branch'
}
},
|
ops: Fix documentation of OpsAccess
States what's OpsAccess is used for and fixes the parameter's types. | @@ -58,13 +58,14 @@ class OpsAccessible(basic.Symbol):
class OpsAccess(basic.Basic, sympy.Basic):
"""
- OPS access
+ A single OPS access. The stencil of a given base (generated by to_ops_stencil) is the
+ union of all its accesses.
Parameters
----------
base : OpsAccessible
Symbol to access
- indices: list of tuples of int
+ indices: list of sympy.Integer
Indices to access
"""
|
llvm, tests/predator-prey: Enable per-node compiled run
Controller is compiled as one node so this i almost as fast as LLVMRun. | @@ -133,7 +133,7 @@ def test_simplified_greedy_agent_random(benchmark, mode):
pytest.param([a / 10.0 for a in range(0, 101)]),
], ids=lambda x: len(x))
def test_predator_prey(benchmark, mode, samples):
- if len(samples) > 10 and mode not in {"LLVMRun", "Python-PTX"}:
+ if len(samples) > 10 and mode not in {"LLVM", "LLVMRun", "Python-PTX"}:
pytest.skip("This test takes too long")
# OCM default mode is Python
mode, ocm_mode = (mode + "-Python").split('-')[0:2]
|
Theme/Scheme: Add builtin color completions
This commit adds all the `--...ish` like colors to the variable completions. The completions are implemented in the color_scheme_dev.py to create consistent results with all the locally defined variables and to work around an issue of the sublime-completions file format, which prevents triggers beginning with none-word characters. | @@ -32,6 +32,20 @@ SCHEME_TEMPLATE = """\
],
}""".replace(" ", "\t")
+VARIABLES = [
+ ("--background\tbuiltin color", "--background"),
+ ("--foreground\tbuiltin color", "--foreground"),
+ ("--accent\tbuiltin color", "--accent"),
+ ("--bluish\tbuiltin color", "--bluish"),
+ ("--cyanish\tbuiltin color", "--cyanish"),
+ ("--greenish\tbuiltin color", "--greenish"),
+ ("--orangish\tbuiltin color", "--orangish"),
+ ("--pinkish\tbuiltin color", "--pinkish"),
+ ("--purplish\tbuiltin color", "--purplish"),
+ ("--redish\tbuiltin color", "--redish"),
+ ("--yellowish\tbuiltin color", "--yellowish"),
+]
+
l = logging.getLogger(__name__)
@@ -74,7 +88,7 @@ class ColorSchemeCompletionsListener(sublime_plugin.ViewEventListener):
"entity.name.variable.sublime-theme")
variables = set(self.view.substr(r) for r in variable_regions)
l.debug("Found %d variables to complete: %r", len(variables), sorted(variables))
- return sorted(("{}\tvariable".format(var), var) for var in variables)
+ return VARIABLES + sorted(("{}\tvariable".format(var), var) for var in variables)
def _scope_prefix(self, locations):
# Determine entire prefix
|
don't define api_version variable for whole api.py file
because reo/api.py now has Resources defined for versions 1 and 2 | @@ -50,7 +50,6 @@ from django.core.exceptions import ValidationError
from celery import group, chain
log = logging.getLogger(__name__)
-api_version = "version 1.0.0"
saveToDb = True
@@ -99,6 +98,7 @@ class Job(ModelResource):
return self.get_object_list(bundle.request)
def obj_create(self, bundle, **kwargs):
+ api_version = "version 1.0.0"
# to use the Job API from within the REopt API (see futurecosts/api.py)
if isinstance(bundle, dict):
@@ -266,6 +266,7 @@ class Job2(ModelResource):
return self.get_object_list(bundle.request)
def obj_create(self, bundle, **kwargs):
+ api_version = "version 2.0.0"
# to use the Job API from within the REopt API (see futurecosts/api.py)
if isinstance(bundle, dict):
@@ -273,7 +274,7 @@ class Job2(ModelResource):
run_uuid = str(uuid.uuid4())
data = dict()
- data["outputs"] = {"Scenario": {'run_uuid': run_uuid, 'api_version': "2",
+ data["outputs"] = {"Scenario": {'run_uuid': run_uuid, 'api_version': api_version,
'Profile': {'pre_setup_scenario_seconds': 0, 'setup_scenario_seconds': 0,
'reopt_seconds': 0, 'reopt_bau_seconds': 0,
'parse_run_outputs_seconds': 0},
@@ -362,7 +363,7 @@ class Job2(ModelResource):
content_type='application/json',
status=500)) # internal server error
setup = setup_scenario.s(run_uuid=run_uuid, data=data, api_version=2)
- call_back = process_results.s(data=data, meta={'run_uuid': run_uuid, 'api_version': "2"})
+ call_back = process_results.s(data=data, meta={'run_uuid': run_uuid, 'api_version': api_version})
# (use .si for immutable signature, if no outputs were passed from reopt_jobs)
rjm = run_jump_model.s(data=data)
rjm_bau = run_jump_model.s(data=data, bau=True)
|
Optimization: Enable "void" C type
* This avoids storing values before releasing them, as forces optimization
to handle unused values perfectly. | @@ -22,8 +22,6 @@ only statement.
"""
-from nuitka import Options
-
from .CodeHelpers import generateExpressionCode
from .ErrorCodes import getReleaseCode
@@ -37,20 +35,11 @@ def generateExpressionOnlyCode(statement, emit, context):
def getStatementOnlyCode(value, emit, context):
- # TODO: Introduce "void" as a C type, which discards all assignments
- # as a no-op.
- if Options.isExperimental("enable_void_ctype"):
tmp_name = context.allocateTempName(
base_name = "unused",
type_name = "void",
unique = True
)
- else:
- tmp_name = context.allocateTempName(
- base_name = "unused",
- type_name = "NUITKA_MAY_BE_UNUSED PyObject *",
- unique = True
- )
generateExpressionCode(
expression = value,
|
issue Revert "ci: update to Ansible 2.8.3"
This reverts commit | @@ -36,25 +36,25 @@ matrix:
include:
# Debops tests.
- # 2.8.3; 3.6 -> 2.7
+ # 2.8.0; 3.6 -> 2.7
- python: "3.6"
- env: MODE=debops_common VER=2.8.3
+ env: MODE=debops_common VER=2.8.0
# 2.4.6.0; 2.7 -> 2.7
- python: "2.7"
env: MODE=debops_common VER=2.4.6.0
# Sanity check against vanilla Ansible. One job suffices.
- python: "2.7"
- env: MODE=ansible VER=2.8.3 DISTROS=debian STRATEGY=linear
+ env: MODE=ansible VER=2.8.0 DISTROS=debian STRATEGY=linear
# ansible_mitogen tests.
- # 2.8.3 -> {debian, centos6, centos7}
+ # 2.8.0 -> {debian, centos6, centos7}
- python: "3.6"
- env: MODE=ansible VER=2.8.3
- # 2.8.3 -> {debian, centos6, centos7}
+ env: MODE=ansible VER=2.8.0
+ # 2.8.0 -> {debian, centos6, centos7}
- python: "2.7"
- env: MODE=ansible VER=2.8.3
+ env: MODE=ansible VER=2.8.0
# 2.4.6.0 -> {debian, centos6, centos7}
- python: "3.6"
|
Fixed client-side errors, like data type validation, not displaying.
Also removed server-error-message class, which wasn't doing anything,
and removed the bolding from the new UI. | <ul data-bind="foreach: erroredQuestions">
<li>
<a href="#" data-bind="click: navigateTo, html: caption_markdown() || caption()"></a>
- <span class="error-message server-error-message" data-bind="
- visible: serverError,
- text: serverError
- "></span>
- <span class="error-message server-error-message" data-bind="
- ifnot: serverError
- ">
+ <span data-bind="visible: serverError, text: serverError"></span>
+ <span data-bind="if: error">
+ <!-- ko text: error --><!-- /ko -->
+ </span>
+ <span data-bind="visible: !serverError() && !error()">
{% trans "An answer is required." %}
</span>
</li>
- </li>
</ul>
</div>
</div>
visible: error,
text: error
"></div>
- <div class="text-danger error-message server-error-message" data-bind="
+ <div class="text-danger error-message" data-bind="
visible: serverError,
text: serverError
"></div>
|
py3/tools: Remove a tuple function parameter
PEP 3113 removed this feature. | @@ -255,12 +255,13 @@ class Reindenter:
return line
# Line-eater for tokenize.
- def tokeneater(self, type, token, (sline, scol), end, line,
+ def tokeneater(self, type, token, sline_and_scol, end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
+ (sline, scol) = sline_and_scol
if type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
|
(fix) Changes OMS connector missing order behaviour on cancellation
The connector now logs the order as not found with the order tracker if this message is received on cancellation request. | @@ -205,9 +205,7 @@ class OMSExchange(ExchangePyBase):
cancel_success = False
if cancel_result.get(CONSTANTS.ERROR_CODE_FIELD):
if cancel_result[CONSTANTS.ERROR_CODE_FIELD] == CONSTANTS.RESOURCE_NOT_FOUND_ERR_CODE:
- self._order_not_found_on_cancel_record[order_id] += 1
- if self._order_not_found_on_cancel_record[order_id] >= CONSTANTS.MAX_ORDER_NOT_FOUND_ON_CANCEL:
- cancel_success = True
+ await self._order_tracker.process_order_not_found(order_id)
else:
raise IOError(cancel_result[CONSTANTS.ERROR_MSG_FIELD])
cancel_success = cancel_success or cancel_result[CONSTANTS.RESULT_FIELD]
|
Optimization: Faster exception dropping
* Avoid getting using thread state twice, on Python3 this can be slower.
* Also avoid reading current exception type again, we already know it's
set. | @@ -556,12 +556,24 @@ NUITKA_MAY_BE_UNUSED static inline void ADD_EXCEPTION_CONTEXT(PyObject **excepti
*/
NUITKA_MAY_BE_UNUSED static bool CHECK_AND_CLEAR_STOP_ITERATION_OCCURRED(void) {
- PyObject *error = GET_ERROR_OCCURRED();
+ PyThreadState *tstate = PyThreadState_GET();
- if (error == NULL) {
+ if (tstate->curexc_type == NULL) {
return true;
- } else if (EXCEPTION_MATCH_BOOL_SINGLE(error, PyExc_StopIteration)) {
- CLEAR_ERROR_OCCURRED();
+ } else if (EXCEPTION_MATCH_BOOL_SINGLE(tstate->curexc_type, PyExc_StopIteration)) {
+ // Clear the exception first, we know it doesn't have side effects.
+ Py_DECREF(tstate->curexc_type);
+ tstate->curexc_type = NULL;
+
+ PyObject *old_value = tstate->curexc_value;
+ PyObject *old_tb = tstate->curexc_traceback;
+
+ tstate->curexc_value = NULL;
+ tstate->curexc_traceback = NULL;
+
+ Py_XDECREF(old_value);
+ Py_XDECREF(old_tb);
+
return true;
} else {
return false;
@@ -575,12 +587,24 @@ NUITKA_MAY_BE_UNUSED static bool CHECK_AND_CLEAR_STOP_ITERATION_OCCURRED(void) {
*/
NUITKA_MAY_BE_UNUSED static bool CHECK_AND_CLEAR_KEY_ERROR_OCCURRED(void) {
- PyObject *error = GET_ERROR_OCCURRED();
+ PyThreadState *tstate = PyThreadState_GET();
- if (error == NULL) {
+ if (tstate->curexc_type == NULL) {
return true;
- } else if (EXCEPTION_MATCH_BOOL_SINGLE(error, PyExc_KeyError)) {
- CLEAR_ERROR_OCCURRED();
+ } else if (EXCEPTION_MATCH_BOOL_SINGLE(tstate->curexc_type, PyExc_KeyError)) {
+ // Clear the exception first, we know it doesn't have side effects.
+ Py_DECREF(tstate->curexc_type);
+ tstate->curexc_type = NULL;
+
+ PyObject *old_value = tstate->curexc_value;
+ PyObject *old_tb = tstate->curexc_traceback;
+
+ tstate->curexc_value = NULL;
+ tstate->curexc_traceback = NULL;
+
+ Py_XDECREF(old_value);
+ Py_XDECREF(old_tb);
+
return true;
} else {
return false;
|
library.Field class rename to FilterField
update library.LibrarySection.filterFields() usage | @@ -479,7 +479,7 @@ class LibrarySection(PlexObject):
for meta in data.iter('Meta'):
for metaType in meta.iter('Type'):
if not mediaType or metaType.attrib.get('type') == mediaType:
- fields = self.findItems(metaType, Field)
+ fields = self.findItems(metaType, FilterField)
for field in fields:
field._initpath = metaType.attrib.get('key')
fieldType = [_ for _ in self.findItems(meta, FieldType) if _.type == field.type]
@@ -1157,8 +1157,8 @@ class Sort(PlexObject):
self.firstCharacterKey = data.attrib.get('firstCharacterKey')
-class Field(PlexObject):
- """ Represents a
+class FilterField(PlexObject):
+ """ Represents a Filters Field element found in library.
"""
TAG = 'Field'
|
util: testing: consoletest: commands: run_command(): Kill process group
Caused issues with InnerSource swportal nodejs http-server
which liked to hang around and keep unittest alive | @@ -14,6 +14,7 @@ import asyncio
import pathlib
import inspect
import tempfile
+import platform
import functools
import contextlib
import subprocess
@@ -361,7 +362,9 @@ def pipes(cmd):
async def stop_daemon(proc):
- # Send ctrl-c to daemon if running
+ if platform.system() != "Windows":
+ # Kill the whole process group (for problematic processes)
+ os.killpg(proc.pid, signal.SIGINT)
proc.send_signal(signal.SIGINT)
proc.wait()
|
configuration/plugin_cache: Add target names as allowed plugin configs
Allow setting of device configuration by specifying target name. | @@ -80,6 +80,7 @@ class PluginCache(object):
raise RuntimeError(msg.format(source))
if (not self.loader.has_plugin(plugin_name) and
+ plugin_name not in self.targets and
plugin_name not in GENERIC_CONFIGS):
msg = 'configuration provided for unknown plugin "{}"'
raise ConfigError(msg.format(plugin_name))
|
Update generic.txt
No ```POST``` to identify malware explicitly, hence -- ```generic.txt``` | @@ -3642,3 +3642,12 @@ http://176.10.118.191
# Reference: https://www.virustotal.com/gui/domain/yourdocument.biz/relations
yourdocument.biz
+
+# Reference: https://twitter.com/takerk734/status/1135955547310632960
+
+http://95.213.217.139
+http://54.36.218.96
+maidcafeyoyo.fun
+simbaooshi.space
+summerch.xyz
+wagenstead.xyz
|
Add PacemakerNetwork definition
This gives us the 'pacemaker_node_ips' hiera key on all nodes, which
will be needed because pcs 0.10 needs to specify the ip addresses of the
cluster when setting up a pcmk2.0 cluster based on knet-corosync. | @@ -99,6 +99,7 @@ parameters:
MistralApiNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
ZaqarApiNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
DockerRegistryNetwork: ctlplane
+ PacemakerNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
PacemakerRemoteNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
TripleoUINetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
DesignateApiNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }}
|
Swap two lines in a build file
Swap the order of two lines in a build file to silent the warning of transforming oss code into google internal codebase. | @@ -57,8 +57,8 @@ tfx_py_proto_library(
srcs = ["local_deployment_config.proto"],
deps = [
":executable_spec_py_pb2",
- ":platform_config_py_pb2",
":metadata_py_pb2",
+ ":platform_config_py_pb2",
"@com_github_google_ml_metadata//ml_metadata/proto:metadata_store_py_pb2",
],
)
|
Fix two mistakes of method description
Fix two mistakes of method description in processor.py | @@ -22,8 +22,8 @@ def main():
service.prepare_service()
# NOTE(mc): This import is done here to ensure that the prepare_service()
- # fonction is called before any cfg option. By importing the orchestrator
- # file, the utils one is imported too, and then some cfg option are read
+ # function is called before any cfg option. By importing the orchestrator
+ # file, the utils one is imported too, and then some cfg options are read
# before the prepare_service(), making cfg.CONF returning default values
# systematically.
from cloudkitty import orchestrator
|
purge-container: get *all* osds id
Adding `--all` to the `systemctl list-units` command in order to get
*all* osds id on the node (including stoppped osds). Otherwise, it will
purge the cluster but there will be leftover after that.
Closes: | - name: get all the running osds
shell: |
- systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]+).service"
+ systemctl list-units --all | grep -oE "ceph-osd@([0-9]+).service"
register: osd_units
ignore_errors: true
|
Update ConfigurationForm.html to allow blank s3_unload_location
Currently, the form gives a validation failure if the unrequired System Tables S3 Unload Location form field isn't filled in. | @@ -61,6 +61,10 @@ $('#config-form').parsley().on('field:validated', function() {
if (config['kms_auth_context'] == "") {
delete config['kms_auth_context'];
}
+ if (config['s3_unload_location'] == "") {
+ delete config['s3_unload_location'];
+ }
+
config['comprows'] = -1;
config['ignore_errors'] = true;
@@ -131,7 +135,7 @@ $('#config-form').parsley().on('field:validated', function() {
<input type="text" class="form-control" name="configuration[schema_name]" required="true" data-parsley-trigger="change" value="public">
<label for="systableUnloadLoc">System Tables S3 Unload Location :</label>
- <input type="text" class="form-control" name="configuration[s3_unload_location]" required="false" data-parsley-trigger="change" value="">
+ <input type="text" class="form-control" name="configuration[s3_unload_location]" data-parsley-required="false" data-parsley-trigger="change" value="">
<label for="systableUnloadRoleArn">System Tables Unload Role ARN :</label>
<input type="text" class="form-control" name="configuration[s3_unload_role_arn]" required="false" data-parsley-trigger="change" value="arn:aws:iam::<your account number>:role/<role name>">
|
plot labels and vlines in every LogPlotter.apply_commands() call
fixes not plotting labels and vlines when show_legends was False | @@ -120,8 +120,8 @@ class LogPlotter(Struct):
yminor_locator = AutoLocator()
self.ax[ig].yaxis.set_minor_locator(yminor_locator)
- if self.show_legends:
for ig, ax in enumerate(self.ax):
+ if self.show_legends:
try:
ax.legend()
except:
|
Enhancement to Resize Augmentation
Added antialiasing option to Resize augmentation similar to Resize in geometry.transforms. | @@ -18,6 +18,7 @@ class Resize(GeometricAugmentationBase2D):
side: Which side to resize, if size is only of type int.
resample: Resampling mode.
align_corners: interpolation flag.
+ antialias: if True, then image will be filtered with Gaussian before downscaling. No effect for upscaling.
keepdim: whether to keep the output shape the same as input (True) or broadcast it
to the batch form (False).
"""
@@ -28,13 +29,20 @@ class Resize(GeometricAugmentationBase2D):
side: str = "short",
resample: Union[str, int, Resample] = Resample.BILINEAR.name,
align_corners: bool = True,
+ antialias: bool = False,
p: float = 1.0,
return_transform: Optional[bool] = None,
keepdim: bool = False,
) -> None:
super().__init__(p=1., return_transform=return_transform, same_on_batch=True, p_batch=p, keepdim=keepdim)
self._param_generator = cast(rg.ResizeGenerator, rg.ResizeGenerator(resize_to=size, side=side))
- self.flags = dict(size=size, side=side, resample=Resample.get(resample), align_corners=align_corners)
+ self.flags = dict(
+ size=size,
+ side=side,
+ resample=Resample.get(resample),
+ align_corners=align_corners,
+ antialias=antialias
+ )
def compute_transformation(self, input: Tensor, params: Dict[str, Tensor]) -> Tensor:
if params["output_size"] == input.shape[-2:]:
@@ -60,6 +68,7 @@ class Resize(GeometricAugmentationBase2D):
out_size,
interpolation=(self.flags["resample"].name).lower(),
align_corners=self.flags["align_corners"],
+ antialias=self.flags["antialias"]
)
return out
|
Fixes formatting
Minor formatting Markdown formatting. | @@ -74,7 +74,7 @@ We've created this [tutorial](/tutorial) to build a basic Slack app in less than
---
-Slack provide a Web API that gives you the ability to build applications that interact with Slack in a variety of ways. This Development Kit is a module based wrapper that makes interaction with that API easier. We have a basic example here with some of the more common uses but a full list of the available methods are available [here][api-methods]. More detailed examples can be found in our [Basic Usage][https://slack.dev/python-slackclient/basic_usage.html] guide
+Slack provide a Web API that gives you the ability to build applications that interact with Slack in a variety of ways. This Development Kit is a module based wrapper that makes interaction with that API easier. We have a basic example here with some of the more common uses but a full list of the available methods are available [here][api-methods]. More detailed examples can be found in our [Basic Usage](https://slack.dev/python-slackclient/basic_usage.html) guide
#### Sending a message to Slack
|
Fix test repo schedules
Test Plan: buildkite
Reviewers: sashank | @@ -158,6 +158,8 @@ def long_running_pipeline_celery():
def define_demo_execution_repo():
+ from .schedules import define_schedules
+
return RepositoryDefinition(
name='demo_execution_repo',
pipeline_dict={
@@ -165,4 +167,5 @@ def define_demo_execution_repo():
'long_running_pipeline_celery': define_long_running_pipeline_celery,
},
pipeline_defs=[demo_pipeline, demo_pipeline_gcs, demo_error_pipeline, optional_outputs,],
+ schedule_defs=define_schedules(),
)
|
update esmya text
small tweaks | -Hello!
+Hello,
This is an important safety update about {{ org_name }} from OpenPrescribing.
Last night the MHRA issued a safety alert and advised that patients taking Esmya (ulipristal acetate) for uterine fibroids should STOP their treatment IMMEDIATELY.
@@ -6,7 +6,7 @@ We have identified that your organisation has issued prescriptions for Esmya (ul
You should:
* Use your clinical system to identify the patients taking Esmya (ulipristal acetate) and those who have recently stopped
-* Phone or text them to advising them to IMMEDIATELY STOP TREATMENT
+* Phone or text them advising them to IMMEDIATELY STOP TREATMENT
* Advise people who are stopping treatment and recent users to seek immediate medical attention if they develop signs and symptoms of liver injury (nausea, vomiting, malaise, right hypochondrial pain, anorexia, asthenia or jaundice)
* Arrange liver function tests for two to four weeks time
|
SetExpression Doc : prefer 'difference' over 'ANDNOT'
and also prefer British english over American english. | @@ -22,13 +22,13 @@ A B C B C D C D E E
The following operators are currently supported
```eval_rst
-=================== ====================================
-Operator Behavior
-=================== ====================================
-\| OR, unites two sets
-& AND, intersects two sets
-\- ANDNOT, removes elements from sets
-=================== ====================================
+=================== ======================================
+Operator Behaviour
+=================== ======================================
+\| Union, unites two sets
+& Intersection, intersects two sets
+\- Difference, removes elements from sets
+=================== ======================================
```
Simple Examples
@@ -72,7 +72,7 @@ set1 \- (B C) A
Operator Precedence
-------------------
-Operations in the expression are executed in the following order: ANDNOT before AND before OR. The following examples demonstrate this in action.
+Operations in the expression are executed in the following order: difference before intersection before union. The following examples demonstrate this in action.
```eval_rst
==================== ==============================
|
test: add Node.js 6 on Windows to Travis CI
Test the oldest supported Node version on Windows.
PR-URL: | @@ -10,7 +10,13 @@ matrix:
osx_image: xcode10.2
language: shell # 'language: python' is not yet supported on macOS
before_install: HOMEBREW_NO_AUTO_UPDATE=1 brew install npm
- - name: "Python 2.7 on Windows"
+ - name: "Node.js 6 & Python 2.7 on Windows"
+ os: windows
+ language: node_js
+ node_js: 6 # node
+ env: PATH=/c/Python27:/c/Python27/Scripts:$PATH
+ before_install: choco install python2
+ - name: "Node.js 12 & Python 2.7 on Windows"
os: windows
language: node_js
node_js: 12 # node
|
help docs: Fix a wrong link in create-a-stream doc
Discussion: | # Create a stream
By default, all users other than guests can create streams. Administrators can
-[restrict the ability to create a stream](/help/stream-permissions) to specific
+[restrict the ability to create a stream](/help/configure-who-can-create-streams) to specific
[roles](/help/roles-and-permissions).
If you are an administrator setting up streams for the first time, check out our
|
Fix default branch
Git now uses the branch that the cache was checked out to, rather than master, by default.
This then follows the selection of 'default branch' as understood by github | @@ -1278,12 +1278,23 @@ class Repo(object):
with self.cache_lock_held(url):
shutil.copytree(cache, path)
+ #
+ # If no revision was specified, use the branch associated with the cache. In the
+ # github case this will be the default branch (IOTBTOOL-279)
+ #
+ if not rev:
+ with cd(cache):
+ branch = scm.getbranch()
+ if branch:
+ rev = branch
+ else:
+ # Can't find the cache branch; use a sensible default
+ rev = scm.default_branch
+
with cd(path):
scm.seturl(formaturl(url, protocol))
scm.cleanup()
info("Update cached copy from remote repository")
- if not rev:
- rev = scm.default_branch
scm.update(rev, True, is_local=offline)
main = False
except (ProcessException, IOError):
|
Add debug flag to helm integration suite
Summary: So we can see the rendered templates when debugging errors
Test Plan: integration
Reviewers: max | @@ -191,6 +191,7 @@ def _helm_chart_helper(namespace, should_cleanup, helm_config, helm_install_name
"install",
"--namespace",
namespace,
+ "--debug",
"-f",
"-",
"dagster",
@@ -203,8 +204,8 @@ def _helm_chart_helper(namespace, should_cleanup, helm_config, helm_install_name
helm_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate(helm_config_yaml.encode("utf-8"))
- print("Helm install completed with stdout: ", stdout)
- print("Helm install completed with stderr: ", stderr)
+ print("Helm install completed with stdout: ", stdout.decode("utf-8"))
+ print("Helm install completed with stderr: ", stderr.decode("utf-8"))
assert p.returncode == 0
# Wait for Dagit pod to be ready (won't actually stay up w/out js rebuild)
|
Forbid using `qubit` as the type of the loop variable
There currently are no defined semantics for "assigning" to a qubit
outside of subroutines, and making this change in the context of the
for-loop is too much. If this is to be allowed, it should be part of a
larger discussion. | @@ -210,9 +210,9 @@ iterations of the loop ``body``. ``values`` can be:
and ``stop`` is an ``int[16]``, the values to be assigned will all be of type
``int[16]``.
-- a value of type ``qubit[n]`` or ``bit[n]``, or the target of a
- ``let`` statement. The corresponding scalar type of the loop variable is
- ``qubit`` or ``bit``, as appropriate.
+- a value of type ``bit[n]``, or the target of a ``let`` statement that creates
+ an alias to classical bits. The corresponding scalar type of the loop
+ variable is ``bit``, as appropriate.
- a value of type ``array[<scalar>, n]``, _i.e._ a one-dimensional
array. Values of type ``scalar`` must be able to be implicitly promoted to
@@ -253,11 +253,11 @@ accessible after the loop.
// do something with 'f'
}
- // Loop over a register of qubits.
- qubit[5] register;
- for q in register {}
+ // Loop over a register of bits.
+ bit[5] register;
+ for b in register {}
let alias = register[1:3];
- for q in alias {}
+ for b in alias {}
While loops
|
Contributing steps updated
Minor phrasing updates to match Github format changes. | @@ -20,10 +20,10 @@ To generate the HTML files from markdown in the `/source` directory:
1. Sign the Contributor License Agreement (see instructions in the next section).
3. On the Mattermost Documentation page that you want to edit, click the GitHub icon on the upper right corner that says "Edit".
-4. Click "Edit this file" (pencil icon).
-5. After making changes, check the "Create a new branch for this commit and start a pull request".
-6. Make sure that the Pull Request has a descriptive title. Add comments to briefly tell others what you have worked on (optional).
-7. Click "Create a Pull Request".
+4. Click "Edit the file in your fork of this project" (pencil icon) on the upper right corner.
+5. After making changes, check the "Propose file change" button.
+6. Compare changes with the original document.
+7. Click "Create a Pull Request". Make sure that the Pull Request has a descriptive title. Add comments to briefly tell others what you have worked on (optional).
**Signing CLA:**
|
Workshop tags: add reading & parsing split "latlng" into ("lat", "lng")
This fixes | @@ -52,7 +52,8 @@ NUM_TRIES = 100
ALLOWED_METADATA_NAMES = [
'slug', 'startdate', 'enddate', 'country', 'venue', 'address',
- 'latlng', 'language', 'eventbrite', 'instructor', 'helper', 'contact',
+ 'latlng', 'lat', 'lng', 'language', 'eventbrite', 'instructor', 'helper',
+ 'contact',
]
@@ -651,7 +652,7 @@ def find_metadata_on_event_homepage(content):
filtered_metadata = {key: value for key, value in metadata.items()
if key in ALLOWED_METADATA_NAMES}
for key, value in filtered_metadata.items():
- if isinstance(value, int):
+ if isinstance(value, int) or isinstance(value, float):
filtered_metadata[key] = str(value)
elif isinstance(value, datetime.date):
filtered_metadata[key] = '{:%Y-%m-%d}'.format(value)
@@ -686,15 +687,23 @@ def parse_metadata_from_event_website(metadata):
if len(language) < 2:
language = ''
+ # read either ('lat', 'lng') pair or (old) 'latlng' comma-separated value
+ if 'lat' in metadata and 'lng' in metadata:
+ latitude = metadata.get('lat', '')
+ longitude = metadata.get('lng', '')
+ else:
+ try:
+ latitude, longitude = metadata.get('latlng', '').split(',')
+ except (ValueError, AttributeError):
+ latitude, longitude = None, None
+
try:
- latitude, _ = metadata.get('latlng', '').split(',')
latitude = float(latitude.strip())
except (ValueError, AttributeError):
# value error: can't convert string to float
# attribute error: object doesn't have "split" or "strip" methods
latitude = None
try:
- _, longitude = metadata.get('latlng', '').split(',')
longitude = float(longitude.strip())
except (ValueError, AttributeError):
# value error: can't convert string to float
@@ -766,14 +775,27 @@ def validate_metadata_from_event_website(metadata):
Requirement('country', None, True, TWOCHAR_FMT),
Requirement('venue', None, True, None),
Requirement('address', None, True, None),
- Requirement('latlng', 'latitude / longitude', True,
- '^' + FRACTION_FMT + r',\s?' + FRACTION_FMT + '$'),
Requirement('instructor', None, True, None),
Requirement('helper', None, True, None),
Requirement('contact', None, True, None),
Requirement('eventbrite', 'Eventbrite event ID', False, r'^\d+$'),
]
+ # additional, separate check for latitude and longitude data
+ latlng_req = Requirement('latlng', 'latitude / longitude', True,
+ r'^{},\s?{}$'.format(FRACTION_FMT, FRACTION_FMT))
+ lat_req = Requirement('lat', 'latitude', True, '^' + FRACTION_FMT + '$')
+ lng_req = Requirement('lng', 'longitude', True, '^' + FRACTION_FMT + '$')
+
+ # separate 'lat' and 'lng' are supported since #1461,
+ # but here we're checking which requirement to add to the list of
+ # "required" requirements
+ if 'lat' in metadata or 'lng' in metadata:
+ requirements.append(lat_req)
+ requirements.append(lng_req)
+ else:
+ requirements.append(latlng_req)
+
for requirement in requirements:
d_ = requirement._asdict()
name_ = ('{display} ({name})'.format(**d_)
|
[cleanup] Instantiate GeneratorFactory for commons site
Part 4 detached from | @@ -200,7 +200,8 @@ def main(*args):
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
- genFactory = pagegenerators.GeneratorFactory()
+ site = pywikibot.Site('commons', 'commons')
+ genFactory = pagegenerators.GeneratorFactory(site=site)
for arg in local_args:
if arg == '-onlyuncat':
@@ -210,10 +211,8 @@ def main(*args):
generator = genFactory.getCombinedGenerator()
if not generator:
- site = pywikibot.Site('commons', 'commons')
generator = pagegenerators.CategorizedPageGenerator(
- pywikibot.Category(site, 'Category:Media needing categories'),
- recurse=True)
+ pywikibot.Category(site, 'Media needing categories'), recurse=True)
initLists()
categorizeImages(generator, onlyUncat)
|
Fix a typo
Fix a typo in parsing boolean values | @@ -277,7 +277,7 @@ class BoolParamType(ParamType):
if isinstance(value, bool):
return bool(value)
value = value.lower()
- if value in ('true', 't,' '1', 'yes', 'y'):
+ if value in ('true', 't', '1', 'yes', 'y'):
return True
elif value in ('false', 'f', '0', 'no', 'n'):
return False
|
Improve log messages
The origin is now precisely printed without the need to explicitly
specify it. | @@ -103,7 +103,11 @@ def log(message, level="INFO", origin=None, prefix=""):
# originname = origin.bl_idname
#else:
# originname = origin
- originname = inspect.stack()[1][1].split('addons/')[-1] + ' - ' + inspect.stack()[1][3]
+ #originname = inspect.stack()[1][1].split('addons/')[-1] + ' - ' + inspect.stack()[1][3]
+ callerframerecord = inspect.stack()[1]
+ frame = callerframerecord[0]
+ info = inspect.getframeinfo(frame)
+ originname = info.filename.split('addons/')[-1] + ' - ' + info.function + '(' + str(info.lineno) + ')'
# Display only messages up to preferred log level
prefs = bpy.context.user_preferences.addons["phobos"].preferences
|
Forbid implicit envs for all properties in EnvSpec
TN: | @@ -240,7 +240,7 @@ class EnvSpec(object):
expr, AbstractNodeData.PREFIX_INTERNAL,
name=names.Name('_{}_{}'.format(name,
next(self.PROPERTY_COUNT))),
- public=False, type=type, has_implicit_env=True
+ public=False, type=type
)
result.append(p)
return p
|
Remove redundant code in QosServiceDriverManager
I don't see notification_api [1] is used anywhere and it
duplicate with push_api[2]. So this patch-set removed it.
[1]https://github.com/openstack/neutron/blob/master/neutron/services/qos/drivers/manager.py#L41
[2]https://github.com/openstack/neutron/blob/master/neutron/services/qos/drivers/manager.py#L50
TrivialFix | @@ -38,7 +38,6 @@ class QosServiceDriverManager(object):
def __init__(self):
self._drivers = []
- self.notification_api = resources_rpc.ResourcesPushRpcApi()
self.rpc_notifications_required = False
rpc_registry.provide(self._get_qos_policy_cb, resources.QOS_POLICY)
# notify any registered QoS driver that we're ready, those will
|
Update writing_NXdata.rst
The interpretation attribute is duly documented by NeXus. | @@ -154,8 +154,7 @@ a *frame number*.
.. note::
- This additional attribute is not mentionned in the official NXdata
- specification.
+ This attribute is documented in the official NeXus `description <https://manual.nexusformat.org/nxdl_desc.html>`_
Writing NXdata with h5py
|
Update arrayeditor.py to correct deprecated numpy operator
Changed numpy boolean subract operator '-' to np.logical_xor. As suggested in the numpy deprec warning.
Tested locally with no error originally described in | @@ -266,7 +266,7 @@ def data(self, index, role=Qt.DisplayRole):
elif role == Qt.BackgroundColorRole and self.bgcolor_enabled \
and value is not np.ma.masked:
hue = self.hue0+\
- self.dhue*(self.vmax-self.color_func(value)) \
+ self.dhue*(np.logical_xor(self.vmax,self.color_func(value))) \
/(self.vmax-self.vmin)
hue = float(np.abs(hue))
color = QColor.fromHsvF(hue, self.sat, self.val, self.alp)
|
Add disk icon
Can't seem to set the icon of the .dmg itself, but this sets thee icon
while it's mounted | @@ -37,7 +37,7 @@ class macos(app):
def install_icon(self):
shutil.copyfile(
"%s.icns" % self.icon,
- self.icon_install_path
+ os.path.join(self.resource_dir, '%s.icns' % self.distribution.get_name())
)
for tag, doctype in self.document_types.items():
@@ -74,6 +74,7 @@ class macos(app):
settings = {'files': [self.app_location],
'symlinks': {'Applications': '/Applications'},
'background': self.background_image,
+ 'icon': os.path.join(self.resource_dir, '%s.icns' % self.distribution.get_name()),
}
dmgbuild.build_dmg(filename=dmg_path,
volume_name=self.formal_name,
|
Fix split multi hts
fixes | @@ -2044,7 +2044,7 @@ def split_multi_hts(ds, keep_star=False, left_aligned=False):
ds.entry_schema, hl.hts_entry_schema
))
- sm = SplitMulti(ds)
+ sm = SplitMulti(ds, keep_star=keep_star, left_aligned=left_aligned)
pl = hl.or_missing(
hl.is_defined(ds.PL),
(hl.range(0, 3).map(lambda i: hl.min((hl.range(0, hl.triangle(ds.alleles.length()))
|
settings: Link organization settings users to user cards.
This makes it easier to browse details on users and bots when
interacting with them in the settings interface.
While the original issue was about just the bots panel, this is
clearly useful for all users.
Fixes: | <tr class="user_row{{#unless is_active}} deactivated_user{{/unless}}" data-user-id="{{user_id}}">
<td>
- <span class="user_name" >{{full_name}} {{#if is_current_user}}<span class="my_user_status">{{t '(you)' }}</span>{{/if}}</span>
+ <span class="user_name" >
+ <a data-user-id="{{user_id}}" class="view_user_profile" tabindex="0">{{full_name}}</a>
+ {{#if is_current_user}}<span class="my_user_status">{{t '(you)' }}</span>{{/if}}</span>
<i class="fa fa-ban deactivated-user-icon" title="{{t 'User is deactivated' }}" {{#if is_active}}style="display: none;"{{/if}}></i>
</td>
{{#if display_email}}
|
ICTCG1Controller zha update
Use same mapping definitions as z2m for ON/OFF functions
Use medium speed right turn for Light.ON to mimic native behaviour of directly bound controller | @@ -178,10 +178,10 @@ class ICTCG1Controller(LightController):
return {
"move_1_70": Light.HOLD_BRIGHTNESS_DOWN,
"move_1_195": Light.HOLD_BRIGHTNESS_DOWN,
- "move_to_level_with_on_off_0_1": Light.OFF,
+ "move_to_level_with_on_off_0_1": "rotate_left_quick",
"move_with_on_off_0_70": Light.HOLD_BRIGHTNESS_UP,
- "move_with_on_off_0_195": Light.HOLD_BRIGHTNESS_UP,
- "move_to_level_with_on_off_255_1": Light.ON_FULL_BRIGHTNESS,
+ "move_with_on_off_0_195": Light.ON,
+ "move_to_level_with_on_off_255_1": "rotate_right_quick",
"stop": Light.RELEASE,
}
|
Upgrade django-watchman to 0.15.0
Adds respect for the WATCHMAN_DISABLE_APM setting | @@ -468,9 +468,9 @@ django-allow-cidr==0.3.0 \
netaddr==0.7.19 \
--hash=sha256:56b3558bd71f3f6999e4c52e349f38660e54a7a8a9943335f73dfc96883e08ca \
--hash=sha256:38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd
-django-watchman==0.14.0 \
- --hash=sha256:0e953c27b8f4c07dcb96712ea4a304de085cf44e7829a33c6e12477cd60b8673 \
- --hash=sha256:d2094f09d1bdaa0f24e710da69d77433bd9011c18f74024acb332a2fcfcafe68
+django-watchman==0.15.0 \
+ --hash=sha256:1be3019ede05804414a67c116d28a2f1589befaf13aabe47bfc6882a00561db7 \
+ --hash=sha256:8faa4bd7cfc092721ffc4662c139313ded92c85c7d16396b8ccc2e31d40350e4
django-jsonview==1.1.0 \
--hash=sha256:b78cc4e3d75e119966d1ad2ae832c38f94ede967e847abee48df51059ddda040 \
--hash=sha256:9907d4958097db243419063477fa504ff63c7483687f852175452a1ff0d5582a
|
Downgrade pub/sub emulator version in CI.
For | @@ -25,7 +25,7 @@ RUN apt-get update && \
google-cloud-sdk-app-engine-python \
google-cloud-sdk-app-engine-python-extras \
google-cloud-sdk-datastore-emulator \
- google-cloud-sdk-pubsub-emulator \
+ google-cloud-sdk-pubsub-emulator=312.0.0-0 \
liblzma-dev \
nodejs \
openjdk-8-jdk
|
docs: Remove 'specify property field type' part from new feature tutorial.
This commit removes the part which mentions specifying the property field
type in new feauture tutorial as it is no longer required to specify the
type. | @@ -540,26 +540,6 @@ in. For example in this case of `mandatory_topics` it will lie in
better to discuss it in the [community](https://chat.zulip.org/)
before implementing it.*
-When defining the property, you'll also need to specify the property
-field type (i.e. whether it's a `bool`, `integer` or `text`).
-
-``` diff
-
-// static/js/settings_org.js
-var org_settings = {
- msg_editing: {
- // ...
- },
- msg_feed: {
- // ...
-+ mandatory_topics: {
-+ type: 'bool',
-+ },
- },
-};
-
-```
-
Note that some settings, like `realm_msg_edit_limit_setting`,
require special treatment, because they don't match the common
pattern. We can't extract the property name and compare the value of
|
Upgrade Travis operating system to Ubuntu 18.04
Ubuntu 18.04 "Bionic" doesn't support Py3.5 as a testing environment,
but comes with newer SQLite version, so we no longer have any issues
with the tests... | -dist: xenial
+dist: bionic
sudo: required
language: python
@@ -6,7 +6,8 @@ language: python
cache: pip
python:
- - 3.5
+ # After upgrading to Ubuntu 18.04 we lost ability to test against Py3.5
+ # - 3.5
- 3.6
- 3.7
|
Bugfix default to the map's strict slashes setting
Rather than each url rule specifying it must be treated as strict
slashes this instead defers to the map's setting. This in turn allows
a user to globally override the strict slashes setting by changing the
map's value. | @@ -533,7 +533,7 @@ class Quart(PackageStatic):
*,
provide_automatic_options: Optional[bool] = None,
is_websocket: bool = False,
- strict_slashes: bool = True,
+ strict_slashes: Optional[bool] = None,
merge_slashes: Optional[bool] = None,
) -> None:
"""Add a route/url rule to the application.
|
Fix db bug in ExportInstructorLocationsView
For each airport that this view returns, the list of instructors should be ordered. SQLite3 sorts by id by default, but it's different on PostgreSQL, resulting in some tests failing on the latter db. | import datetime
from itertools import accumulate
-from django.db.models import Count, Sum, Case, F, When, Value, IntegerField, Min
+from django.db.models import (
+ Count,
+ Sum,
+ Case,
+ F,
+ When,
+ Value,
+ IntegerField,
+ Min,
+ Prefetch,
+)
from rest_framework import viewsets
from rest_framework.decorators import list_route
from rest_framework.filters import DjangoFilterBackend
@@ -150,8 +160,11 @@ class ExportInstructorLocationsView(ListAPIView):
permission_classes = (IsAuthenticatedOrReadOnly, )
paginator = None # disable pagination
- queryset = Airport.objects.exclude(person=None) \
- .prefetch_related('person_set')
+ queryset = Airport.objects.exclude(person=None).prefetch_related(
+ # Make sure that we sort instructors by id. This is default behaviour on
+ # SQLite, but not in PostgreSQL. This is necessary to pass
+ # workshops.test_export.TestExportingInstructors.test_serialization.
+ Prefetch('person_set', queryset=Person.objects.order_by('id')))
serializer_class = ExportInstructorLocationsSerializer
|
Update TheCatAPI link
Direct link to API documentation | @@ -65,7 +65,7 @@ Please note a passing build status indicates all listed APIs are available since
API | Description | Auth | HTTPS | CORS |
|---|---|---|---|---|
| [Cat Facts](https://alexwohlbruck.github.io/cat-facts/) | Daily cat facts | No | Yes | No |
-| [Cats](https://thecatapi.com/docs.html) | Pictures of cats from Tumblr | `apiKey` | Yes | Unknown |
+| [Cats](https://docs.thecatapi.com/) | Pictures of cats from Tumblr | `apiKey` | Yes | Unknown |
| [Dogs](https://dog.ceo/dog-api/) | Based on the Stanford Dogs Dataset | No | Yes | Yes |
| [HTTPCat](https://http.cat/) | Cat for every HTTP Status | No | Yes | Unknown |
| [IUCN](http://apiv3.iucnredlist.org/api/v3/docs) | IUCN Red List of Threatened Species | `apiKey` | No | Unknown |
|
Add SQL-native implementation for run step stats
Summary: Depends on D2399
Test Plan: BK
Reviewers: sashank | from abc import abstractmethod
+from collections import defaultdict
import six
import sqlalchemy as db
from dagster.core.errors import DagsterEventLogInvalidForRun
from dagster.core.events import DagsterEventType
from dagster.core.events.log import EventRecord
+from dagster.core.execution.stats import RunStepKeyStatsSnapshot
from dagster.serdes import deserialize_json_to_dagster_namedtuple, serialize_dagster_namedtuple
from dagster.utils import datetime_as_float, utc_datetime_from_timestamp
@@ -155,6 +157,63 @@ def get_stats_for_run(self, run_id):
except (seven.JSONDecodeError, check.CheckError) as err:
six.raise_from(DagsterEventLogInvalidForRun(run_id=run_id), err)
+ def get_step_stats_for_run(self, run_id):
+ check.str_param(run_id, 'run_id')
+
+ STEP_STATS_EVENT_TYPES = [
+ DagsterEventType.STEP_START.value,
+ DagsterEventType.STEP_SUCCESS.value,
+ DagsterEventType.STEP_SKIPPED.value,
+ DagsterEventType.STEP_FAILURE.value,
+ ]
+
+ query = (
+ db.select(
+ [
+ SqlEventLogStorageTable.c.step_key,
+ SqlEventLogStorageTable.c.dagster_event_type,
+ db.func.max(SqlEventLogStorageTable.c.timestamp).label('timestamp'),
+ ]
+ )
+ .where(SqlEventLogStorageTable.c.run_id == run_id)
+ .where(SqlEventLogStorageTable.c.step_key != None)
+ .where(SqlEventLogStorageTable.c.dagster_event_type.in_(STEP_STATS_EVENT_TYPES))
+ .group_by(
+ SqlEventLogStorageTable.c.step_key, SqlEventLogStorageTable.c.dagster_event_type,
+ )
+ )
+
+ with self.connect(run_id) as conn:
+ results = conn.execute(query).fetchall()
+
+ by_step_key = defaultdict(dict)
+ for result in results:
+ step_key = result.step_key
+ if result.dagster_event_type == DagsterEventType.STEP_START.value:
+ by_step_key[step_key]['start_time'] = (
+ datetime_as_float(result.timestamp) if result.timestamp else None
+ )
+ if result.dagster_event_type == DagsterEventType.STEP_FAILURE.value:
+ by_step_key[step_key]['end_time'] = (
+ datetime_as_float(result.timestamp) if result.timestamp else None
+ )
+ by_step_key[step_key]['status'] = DagsterEventType.STEP_FAILURE
+ if result.dagster_event_type == DagsterEventType.STEP_SUCCESS.value:
+ by_step_key[step_key]['end_time'] = (
+ datetime_as_float(result.timestamp) if result.timestamp else None
+ )
+ by_step_key[step_key]['status'] = DagsterEventType.STEP_SUCCESS
+ if result.dagster_event_type == DagsterEventType.STEP_SKIPPED.value:
+ by_step_key[step_key]['end_time'] = (
+ datetime_as_float(result.timestamp) if result.timestamp else None
+ )
+ by_step_key[step_key]['status'] = DagsterEventType.STEP_SKIPPED
+
+ return [
+ RunStepKeyStatsSnapshot(run_id=run_id, step_key=step_key, **value)
+ for step_key, value in by_step_key.items()
+ ]
+
def wipe(self):
'''Clears the event log storage.'''
# Should be overridden by SqliteEventLogStorage and other storages that shard based on
|
Update wq example to work with current libraries:
uproot4 -> uproot
awkward1 -> awkward
drop flatten and nano options
register behaviors within process method. | # Sample processor class given in the Coffea manual.
###############################################################
-import uproot4
+import uproot
from coffea.nanoevents import NanoEventsFactory, BaseSchema
# https://github.com/scikit-hep/uproot4/issues/122
-uproot4.open.defaults["xrootd_handler"] = uproot4.source.xrootd.MultithreadedXRootDSource
+uproot.open.defaults["xrootd_handler"] = uproot.source.xrootd.MultithreadedXRootDSource
-import awkward1 as ak
+import awkward as ak
from coffea import hist, processor
# register our candidate behaviors
@@ -49,6 +49,11 @@ class MyProcessor(processor.ProcessorABC):
return self._accumulator
def process(self, events):
+
+ # Note: This is required to ensure that behaviors are registered
+ # when running this code in a remote task.
+ ak.behavior.update(candidate.behavior)
+
output = self.accumulator.identity()
dataset = events.metadata['dataset']
@@ -117,9 +122,7 @@ fileset = {
work_queue_executor_args = {
# Options are common to all executors:
- 'flatten': True,
'compression': 1,
- 'nano' : False,
'schema' : BaseSchema,
'skipbadfiles': False, # Note that maxchunks only works if this is false.
|
Fixed bug in TFMultiStepMetric where all metrics were recorded with same name.
The bug only occurs when plotting the TFMultiStepMetrics against an auxiliary step_metric, not when plotting against the train_step. | @@ -226,7 +226,17 @@ class TFMultiMetricStepMetric(TFStepMetric):
# Skip plotting the metrics against itself.
if self.name == step_metric.name:
continue
- step_tag = '{}vs_{}/{}'.format(prefix, step_metric.name, self.name)
+
+ # The default metric name is the `single_metric_name` followed by the
+ # index.
+ metric_name = single_metric_name + str(metric_index)
+ # In case there is a valid individual name for each metric, use it.
+ if (metric_index < len(self.metric_names) and
+ len(result_list) == len(self.metric_names) and
+ self.metric_names[metric_index] is not None):
+ metric_name = self.metric_names[metric_index]
+ step_tag = '{}vs_{}/{}/{}'.format(prefix, step_metric.name,
+ self.name, metric_name)
# Summaries expect the step value to be an int64.
step = tf.cast(step_metric.result(), tf.int64)
summaries.append(tf.compat.v2.summary.scalar(
|
Test TzinfoParser against full timezone database
Closes: crsmithdev/arrow#657 | @@ -6,14 +6,22 @@ import os
import time
from datetime import datetime
+import pytz
from chai import Chai
from dateutil import tz
+from dateutil.zoneinfo import get_zonefile_instance
from arrow import parser
from arrow.constants import MAX_TIMESTAMP_US
from arrow.parser import DateTimeParser, ParserError, ParserMatchError
+def make_full_tz_list():
+ dateutil_zones = set(get_zonefile_instance().zones)
+ pytz_zones = set(pytz.all_timezones)
+ return dateutil_zones.union(pytz_zones)
+
+
class DateTimeParserTests(Chai):
def setUp(self):
super(DateTimeParserTests, self).setUp()
@@ -332,23 +340,7 @@ class DateTimeParserParseTests(Chai):
)
def test_parse_tz_name_zzz(self):
- for tz_name in (
- # best solution would be to test on every available tz name from
- # the tz database but it is actually tricky to retrieve them from
- # dateutil so here is short list that should match all
- # naming patterns/conventions in used tz database
- "Africa/Tripoli",
- "America/Port_of_Spain",
- "Australia/LHI",
- "Etc/GMT-11",
- "Etc/GMT0",
- "Etc/UCT",
- "Etc/GMT+9",
- "GMT+0",
- "CST6CDT",
- "GMT-0",
- "W-SU",
- ):
+ for tz_name in make_full_tz_list():
self.expected = datetime(2013, 1, 1, tzinfo=tz.gettz(tz_name))
self.assertEqual(
self.parser.parse("2013-01-01 %s" % tz_name, "YYYY-MM-DD ZZZ"),
|
Arch LInux installation guild
Related | @@ -24,6 +24,8 @@ OR Install manim via the git repository with venv::
$ source bin/activate
$ pip3 install -r requirement.txt
+For Arch Linux users, install python-manimlib_:sup:`AUR` package.
+
To use manim in virtual environment you need to activate the environment with
the ``activate`` binary by doing ``source bin/activate``, to exit use the ``deactivate`` command.
@@ -39,3 +41,5 @@ the ``activate`` binary by doing ``source bin/activate``, to exit use the ``deac
texlive texlive-latex-extra texlive-fonts-extra
texlive-latex-recommended texlive-science texlive-fonts-extra tipa
+
+.. _python-manimlib: https://aur.archlinux.org/packages/python-manimlib/
|
svtplay: for some reason they presented the m3u8 file as mpd
fixes: | @@ -111,11 +111,11 @@ class Svtplay(Service, MetadataThumbMixin):
query = parse_qs(urlparse(i["url"]).query)
if "alt" in query and len(query["alt"]) > 0:
alt = self.http.get(query["alt"][0])
- if i["format"][:3] == "hls":
+ if i["url"].find(".m3u8") > 0:
streams = hlsparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output)
if alt:
alt_streams = hlsparse(self.config, self.http.request("get", alt.request.url), alt.request.url, output=self.output)
- elif i["format"][:4] == "dash":
+ elif i["url"].find(".mpd") > 0:
streams = dashparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output)
if alt:
alt_streams = dashparse(self.config, self.http.request("get", alt.request.url), alt.request.url, output=self.output)
|
fix github actions for forked PRs
Summary:
Pull Request resolved:
I was trying to be too clever with GITHUB_HEAD_REF...
Test Plan: Imported from OSS | @@ -24,9 +24,11 @@ jobs:
# We are on master, just set the SHA from our current location
echo ::set-output name=commit_sha::${GITHUB_SHA}
else
- # We are on a PR, we need to check out PR branch
- git checkout ${GITHUB_HEAD_REF}
- echo ::set-output name=commit_sha::$(git rev-parse ${GITHUB_HEAD_REF})
+ # We are on a PR, so actions/checkout leaves us on merge commit.
+ # Check out the actual tip of the branch.
+ PR_TIP=$(git rev-parse HEAD^2)
+ git checkout ${PR_TIP}
+ echo ::set-output name=commit_sha::${PR_TIP}
fi
id: get_pr_tip
- name: Run flake8
|
fire: Remove periods from config documentation
The period is already accounted for in the decorator | @@ -55,24 +55,24 @@ STANDARD_CONFIG_INFO_DICT = {
"validation": [True, False],
"definition": (
"Should lines starting with a templating placeholder"
- " such as `{{blah}}` have their indentation linted."
+ " such as `{{blah}}` have their indentation linted"
),
},
"select_clause_trailing_comma": {
"validation": ["forbid", "require"],
"definition": (
- "Should trailing commas within select clauses be required or forbidden."
+ "Should trailing commas within select clauses be required or forbidden"
),
},
"ignore_comment_lines": {
"validation": [True, False],
"definition": (
"Should lines that contain only whitespace and comments"
- " be ignored when linting line lengths."
+ " be ignored when linting line lengths"
),
},
"forbid_subquery_in": {
"validation": ["join", "from", "both"],
- "definition": "Which clauses should be linted for subqueries.",
+ "definition": "Which clauses should be linted for subqueries",
},
}
|
Update changelog
Fixed double colons in recent 3.0 entries | =========
Changelog
=========
+* :feature:`547` Add support for specifying ``--non-interactive`` as an
+ environment variable.
* :release:`3.0.0 <2019-11-18>`
-* :feature:`336`: When a client certificate is indicated, all password
+* :feature:`336` When a client certificate is indicated, all password
processing is disabled.
-* :feature:`524`: Twine now unconditionally requires the keyring library
+* :feature:`489` Add ``--non-interactive`` flag to abort upload rather than
+ interactively prompt if credentials are missing.
+* :feature:`524` Twine now unconditionally requires the keyring library
and no longer supports uninstalling ``keyring`` as a means to disable
that functionality. Instead, use ``keyring --disable`` keyring functionality
if necessary.
|
optimisations to threads
start connection thread during `Monitor.start`
perform initial download in download_thread | @@ -1570,6 +1570,11 @@ def download_worker(sync, syncing, running, connected, queue_downloading):
syncing.wait() # if not running, wait until resumed
try:
+
+ if not sync.last_cursor:
+ # run the initial Dropbox download
+ sync.get_remote_dropbox()
+ else:
# wait for remote changes (times out after 120 secs)
has_changes = sync.wait_for_remote_changes(sync.last_cursor, timeout=120)
@@ -1733,14 +1738,6 @@ class MaestralMonitor(object):
self.sync = UpDownSync(self.client, self.queue_to_upload)
- self.connection_thread = Thread(
- target=connection_helper,
- daemon=True,
- args=(self.client, self.syncing, self.connected),
- name="Maestral connection helper"
- )
- self.connection_thread.start()
-
def start(self, overload=None):
"""Creates observer threads and starts syncing."""
@@ -1755,6 +1752,13 @@ class MaestralMonitor(object):
self.file_handler, self.sync.dropbox_path, recursive=True
)
+ self.connection_thread = Thread(
+ target=connection_helper,
+ daemon=True,
+ args=(self.client, self.syncing, self.running, self.connected),
+ name="Maestral connection helper"
+ )
+
self.download_thread = Thread(
target=download_worker,
daemon=True,
@@ -1773,6 +1777,7 @@ class MaestralMonitor(object):
self.running.set()
+ self.connection_thread.start()
self.local_observer_thread.start()
self.download_thread.start()
self.upload_thread.start()
@@ -1883,10 +1888,6 @@ class MaestralMonitor(object):
if was_paused:
self.pause()
- def __del__(self):
- self.stop()
- self.connection_thread_running.clear()
-
# ========================================================================================
# Helper functions
|
Add send/recv multipart message test with polling
Summary:
I ended up writing this UT while debugging with pirateninja. Having more UTs
is better so sending out this diff. | @@ -342,6 +342,48 @@ TEST(ZmqEventLoopTest, scheduleTimeoutApi) {
EXPECT_FALSE(evl.isRunning());
}
+TEST(ZmqEventLoopTest, sendRecvMultipart) {
+ Context context;
+ ZmqEventLoop evl;
+ const SocketUrl socketUrl{"inproc://server_url"};
+
+ Socket<ZMQ_REP, ZMQ_SERVER> serverSock{context};
+ serverSock.bind(socketUrl).value();
+ evl.addSocket(RawZmqSocketPtr{*serverSock}, ZMQ_POLLIN, [&](int) noexcept {
+ LOG(INFO) << "Received request on server socket.";
+ Message msg1, msg2;
+ serverSock.recvMultiple(msg1, msg2).value();
+ LOG(INFO) << "Messages received .... "
+ << "\n\t " << msg1.read<std::string>().value()
+ << "\n\t " << msg2.read<std::string>().value();
+ EXPECT_EQ(std::string("hello world"), msg1.read<std::string>().value());
+ EXPECT_EQ(std::string("yolo"), msg2.read<std::string>().value());
+ serverSock.sendMultiple(msg1, msg2).value();
+ evl.stop();
+ });
+
+ std::thread evlThread([&]() noexcept {
+ LOG(INFO) << "Starting event loop";
+ evl.run();
+ LOG(INFO) << "Event loop stopped";
+ });
+ evl.waitUntilRunning();
+
+ Socket<ZMQ_REQ, ZMQ_CLIENT> clientSock{context};
+ clientSock.connect(socketUrl).value();
+
+ LOG(INFO) << "Sending messages.";
+ clientSock.sendMultiple(
+ Message::from(std::string("hello world")).value(),
+ Message::from(std::string("yolo")).value());
+ LOG(INFO) << "Receiving messages.";
+ auto msgs = clientSock.recvMultiple().value();
+ LOG(INFO) << "Received messages.";
+ EXPECT_EQ(2, msgs.size());
+
+ evlThread.join();
+}
+
} // namespace fbzmq
int
|
Fix handling of gin file in grid_search
Previously, for gin config, only an empty config file was writen to root directory and the jobs cannot run. | @@ -389,6 +389,11 @@ def launch_snapshot_gridsearch():
# write the current conf file as
# ``<root_dir>/alf_config.py`` or ``<root_dir>/configured.gin``
+ conf_file = common.get_conf_file()
+ if conf_file.endswith('.gin'):
+ # for gin, we need to parse it first. Otherwise, configured.gin will be
+ # empty
+ common.parse_conf_file(conf_file)
common.write_config(root_dir)
# generate a snapshot of ALF repo as ``<root_dir>/alf``
|
Mark Adorable Avatars as HTTPS capable
While their main website returns the wrong certificate, the actual API does support HTTPS | @@ -118,7 +118,7 @@ For information on contributing to this project, please see the [contributing gu
| API | Description | Auth | HTTPS | Link |
|---|---|---|---|---|
-| Adorable Avatars | Generate random cartoon avatars | No | No | [Go!](http://avatars.adorable.io) |
+| Adorable Avatars | Generate random cartoon avatars | No | Yes | [Go!](http://avatars.adorable.io) |
| APIs.guru | Wikipedia for Web APIs, OpenAPI/Swagger specs for public APIs | No | Yes | [Go!](https://apis.guru/api-doc/) |
| CDNJS | Library info on CDNJS | No | Yes | [Go!](https://api.cdnjs.com/libraries/jquery) |
| Faceplusplus | A tool to detect face | `oAuth` | No | [Go!](http://www.faceplusplus.com/uc_home/) |
|
Database Table Creation Modification
Summary:
Modified:
fbcode/fbjava/fb-spark-applications/rl/dqn-preprocessing/src/main/scala/com/facebook/spark/rl/MultiStepTimeline.scala
To have a similar table creation process as:
fbcode/fbjava/fb-spark-applications/rl/dqn-preprocessing/src/main/scala/com/facebook/spark/rl/Timeline.scala | @@ -128,16 +128,15 @@ object MultiStepTimeline {
Helper.getDataTypes(sqlContext, config.inputTableName, List("action"))("action")
log.info("action column data type:" + s"${actionDataType}")
assert(Set("string", "map<bigint,double>").contains(actionDataType))
- val actionDiscrete = actionDataType == "string"
var sortActionMethod = "UDF_SORT_ID";
var sortPossibleActionMethod = "UDF_SORT_ARRAY_ID";
- if (!actionDiscrete) {
+ if (actionDataType != "string") {
sortActionMethod = "UDF_SORT_MAP";
sortPossibleActionMethod = "UDF_SORT_ARRAY_MAP";
}
- MultiStepTimeline.createTrainingTable(sqlContext, config.outputTableName, actionDiscrete)
+ MultiStepTimeline.createTrainingTable(sqlContext, config.outputTableName, actionDataType)
MultiStepTimeline.registerUDFs(sqlContext)
val sqlCommand = s"""
@@ -286,30 +285,23 @@ object MultiStepTimeline {
def createTrainingTable(
sqlContext: SQLContext,
tableName: String,
- actionDiscrete: Boolean
+ actionDataType: String
): Unit = {
- var actionType = "STRING";
- var possibleActionType = "ARRAY<STRING>";
- if (!actionDiscrete) {
- actionType = "MAP<BIGINT, DOUBLE>"
- possibleActionType = "ARRAY<MAP<BIGINT,DOUBLE>>"
- }
-
val sqlCommand = s"""
CREATE TABLE IF NOT EXISTS ${tableName} (
mdp_id STRING,
state_features MAP <BIGINT, DOUBLE>,
- action ${actionType},
+ action ${actionDataType},
action_probability DOUBLE,
reward ARRAY<DOUBLE>,
next_state_features ARRAY<MAP<BIGINT,DOUBLE>>,
- next_action ARRAY<${actionType}>,
+ next_action ARRAY<${actionDataType}>,
sequence_number BIGINT,
sequence_number_ordinal BIGINT,
time_diff ARRAY<BIGINT>,
time_since_first BIGINT,
- possible_actions ${possibleActionType},
- possible_next_actions ARRAY<${possibleActionType}>,
+ possible_actions Array<${actionDataType}>,
+ possible_next_actions ARRAY<ARRAY<${actionDataType}>>,
metrics ARRAY<MAP<STRING, DOUBLE>>
) PARTITIONED BY (ds STRING) TBLPROPERTIES ('RETENTION'='30')
""".stripMargin
|
Update DEV_SETUP.md
Updated some mac install information for python3.6 and virtualenvwrapper | @@ -61,6 +61,10 @@ Save those backups to somewhere you'll be able to access from the new environmen
$ sudo python get-pip.py
$ sudo pip install virtualenvwrapper --ignore-installed six
+- For downloading Python 3.6 consider:
+ 1. Using [pyenv](https://github.com/pyenv/pyenv-installer)
+ 2. Using homebrew with this [brew formula](https://gist.github.com/SamuelMarks/0ceaaf6d3de12b6408e3e67aae80ae3b)
+
- Additional requirements:
- [Homebrew](https://brew.sh)
- [libmagic](https://macappstore.org/libmagic) (available via homebrew)
@@ -95,7 +99,7 @@ Save those backups to somewhere you'll be able to access from the new environmen
script, say, ~/.bashrc, or ~/.zshrc. For example:
$ cat <<EOF >> ~/.bashrc
- export WORKON_HOME=\$HOME/venv
+ export WORKON_HOME=~/venv
export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3
source /usr/local/bin/virtualenvwrapper.sh
EOF
|
Conveyor: fix exception in submitter. Closes
TypeError: '<' not supported between instances of 'datetime.datetime' and 'NoneType' | @@ -1598,7 +1598,8 @@ def create_missing_replicas_and_requests(
creation_successful = False
existing_request = get_request_by_did(rws.scope, rws.name, rws.dest_rse.id, session=session)
- if datetime.datetime.utcnow() - CONCURRENT_SUBMISSION_TOLERATION_DELAY < existing_request['requested_at']:
+ if existing_request['requested_at'] and \
+ datetime.datetime.utcnow() - CONCURRENT_SUBMISSION_TOLERATION_DELAY < existing_request['requested_at']:
concurrent_submission_detected = True
break
|
[hail] Wrap AbstractRVDSpec.read failure to log the path
The exception message will now have the metadata path to more easily
assist in diagnosing issues. | @@ -42,10 +42,14 @@ object AbstractRVDSpec {
new ETypeSerializer
def read(fs: FS, path: String): AbstractRVDSpec = {
+ try {
val metadataFile = path + "/metadata.json.gz"
using(fs.open(metadataFile)) { in => JsonMethods.parse(in) }
.transformField { case ("orvdType", value) => ("rvdType", value) } // ugh
.extract[AbstractRVDSpec]
+ } catch {
+ case e: Exception => fatal(s"failed to read RVD spec $path", e)
+ }
}
def readLocal(
|
Change Locust website url to https
update README with http urls | ## Links
-* Website: <a href="http://locust.io">locust.io</a>
-* Documentation: <a href="http://docs.locust.io">docs.locust.io</a>
+* Website: <a href="https://locust.io">locust.io</a>
+* Documentation: <a href="https://docs.locust.io">docs.locust.io</a>
* Support/Questions: [Slack signup](https://slack.locust.io/)
## Description
|
Code block: remove truncate function
No longer used anywhere. | @@ -99,17 +99,3 @@ def is_repl_code(content: str, threshold: int = 3) -> bool:
return True
return False
-
-
-def truncate(content: str, max_chars: int = 204, max_lines: int = 10) -> str:
- """Return `content` truncated to be at most `max_chars` or `max_lines` in length."""
- current_length = 0
- lines_walked = 0
-
- for line in content.splitlines(keepends=True):
- if current_length + len(line) > max_chars or lines_walked == max_lines:
- break
- current_length += len(line)
- lines_walked += 1
-
- return content[:current_length] + "#..."
|
fopen: Workaround bad buffering for binary mode
A lot of code assumes Python 2.x behavior for buffering, in which 1 is a
special value meaning line buffered.
Python 3 makes this value unusable, so fallback to the default buffering
size, and report these calls to be fixed.
Fixes: | @@ -382,6 +382,11 @@ def fopen(*args, **kwargs):
if not binary and not kwargs.get("newline", None):
kwargs["newline"] = ""
+ # Workaround callers with bad buffering setting for binary files
+ if kwargs.get("buffering", -1) == 1 and 'b' in kwargs.get("mode", ""):
+ log.debug("Bad buffering specified for '%s'", args[0], stack_info=True)
+ del(kwargs["buffering"])
+
f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage
if is_fcntl_available():
|
Remove dead test code
We're not longer supporting Debian 8, so this skip will never be hit. | @@ -3,15 +3,8 @@ tests for host state
"""
-import salt.utils.platform
from tests.support.case import ModuleCase
-HAS_LSB_RELEASE = True
-try:
- import lsb_release
-except ImportError:
- HAS_LSB_RELEASE = False
-
class CompileTest(ModuleCase):
"""
@@ -31,13 +24,6 @@ class CompileTest(ModuleCase):
Test when we have an error in a execution module
called by jinja
"""
- if salt.utils.platform.is_linux() and HAS_LSB_RELEASE:
- release = lsb_release.get_distro_information()
- if (
- release.get("ID") == "Debian"
- and int(release.get("RELEASE", "0")[0]) < 9
- ):
- self.skipTest("This test is flaky on Debian 8. Skipping.")
ret = self.run_function("state.sls", ["issue-10010"])
self.assertTrue(", in jinja_error" in ret[0].strip())
|
Make table lock optional when adding products
This should probably be called "allow_exclusive_lock" (or similar) to avoid being specific to DBs, but it's named this way in the other add() methods already, so consistency wins. | @@ -275,10 +275,15 @@ class ProductResource(object):
return DatasetType(metadata_type, definition)
- def add(self, type_):
+ def add(self, type_, allow_table_lock=False):
"""
Add a Product.
+ :param allow_table_lock:
+ Allow an exclusive lock to be taken on the table while creating the indexes.
+ This will halt other user's requests until completed.
+
+ If false, creation will be slightly slower and cannot be done in a transaction.
:param datacube.model.DatasetType type_: Product to add
:rtype: datacube.model.DatasetType
"""
@@ -295,14 +300,15 @@ class ProductResource(object):
metadata_type = self.metadata_type_resource.get_by_name(type_.metadata_type.name)
if metadata_type is None:
_LOG.warning('Adding metadata_type "%s" as it doesn\'t exist.', type_.metadata_type.name)
- metadata_type = self.metadata_type_resource.add(type_.metadata_type)
+ metadata_type = self.metadata_type_resource.add(type_.metadata_type, allow_table_lock=allow_table_lock)
with self._db.connect() as connection:
connection.add_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=metadata_type.id,
search_fields=metadata_type.dataset_fields,
- definition=type_.definition
+ definition=type_.definition,
+ concurrently=not allow_table_lock,
)
return self.get_by_name(type_.name)
|
Commit quantile output format.
Now quantiles are a dict with quantile-name -> values. | @@ -178,9 +178,12 @@ class Forecast:
result["mean"] = self.mean.tolist()
if OutputType.quantiles in config.output_types:
- result["quantiles"] = [
- self.quantile(q).tolist() for q in config.quantiles
- ]
+ quantiles = map(Quantile.parse, config.quantiles)
+
+ result["quantiles"] = {
+ quantile.name: self.quantile(quantile.value).tolist()
+ for quantile in quantiles
+ }
if OutputType.samples in config.output_types:
result["samples"] = []
|
If there is only a single image in the shoebox then set the z centroid
as image + 0.5. Previously, as this was calculated as an intensity
weighted centroid it would result in small deviations around 1e-7 from
0.5 which resulted in strange output in spot finding for a single image.
Fixes | @@ -329,6 +329,9 @@ namespace dials { namespace model {
try {
Centroider centroid(data.const_ref(), foreground_mask);
result = extract_centroid_object(centroid, offset);
+ if (bbox[5] == bbox[4] + 1) {
+ result.px.position[2] = bbox[4] + 0.5;
+ }
} catch (dials::error) {
double xmid = (bbox[1] + bbox[0]) / 2.0;
double ymid = (bbox[3] + bbox[2]) / 2.0;
|
Update wmts100capabilities.xml
In WMTS 1.0.0 it is ows:Keywords not ows:KeywordList
What a mess. | <ows:Title>{{service.title}}</ows:Title>
<ows:Abstract>{{service.abstract}}</ows:Abstract>
{{if service.keyword_list and len(service.keyword_list) > 0}}
- <ows:KeywordList>
+ <ows:Keywords>
{{for list in service.keyword_list}}
{{py: kw=bunch(default='', **list)}}
{{for keyword in kw.keywords}}
<ows:Keyword{{if kw.vocabulary}} vocabulary="{{kw.vocabulary}}"{{endif}}>{{keyword}}</ows:Keyword>
{{endfor}}
{{endfor}}
- </ows:KeywordList>
+ </ows:Keywords>
{{endif}}
<ows:ServiceType>OGC WMTS</ows:ServiceType>
<ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>
|
fix: remove old and duplicated test cases in tests.cli.main
Remove old and duplicated test cases in tests.cli.main which were
replaced with newer test cases in tests.cli.test_*. | @@ -49,45 +49,6 @@ class RunTestWithTmpdir(RunTestBase):
shutil.rmtree(str(self.tmpdir))
-class Test_10(RunTestBase):
- infile = tests.common.respath('00-cnf.json')
-
- def test_10_show_usage(self):
- self.run_and_check_exit_code(["--help"])
-
- def test_20_wo_args(self):
- self.run_and_check_exit_code(_not=True)
-
- def test_30_wrong_option(self):
- self.run_and_check_exit_code(["--wrong-option-xyz"], _not=True)
-
- def test_40_list(self):
- self.run_and_check_exit_code(["--list"])
-
- def test_50_unknown_input_file_type(self):
- self.run_and_check_exit_code([__file__], _not=True)
-
- def test_52_unknown_input_parser_type(self):
- self.run_and_check_exit_code([__file__, "-I", "unknown_psr"],
- _not=True)
-
- def test_54_no_input_type_and_unknown_out_file_type(self):
- self.run_and_check_exit_code([__file__, __file__ + '.un_ext'],
- _not=True)
-
-
-class Test_12(RunTestWithTmpdir):
- infile = tests.common.respath('00-cnf.json')
-
- def test_60_unknown_out_file_type(self):
- opath = str(self.tmpdir / "t.unknown_ext")
- self.run_and_check_exit_code([self.infile, "-o", opath], _not=True)
-
- def test_62_unknown_out_parser_type(self):
- opath = str(self.tmpdir / "t.unknown_psr")
- self.run_and_check_exit_code([self.infile, "-O", opath], _not=True)
-
-
class Test_20_Base(RunTestBase):
def setUp(self):
|
fix min and max values for exported and imported trade values
remove explicit creation of DataSubjectArray
pass country list for exports/imports during annotation for DP metadata | "outputs": [],
"source": [
"domain_client = sy.login(\n",
- " url=\"localhost:80\",#auto_detect_domain_host_ip(),\n",
+ " url=\"localhost:8081\",#auto_detect_domain_host_ip(),\n",
" email=\"[email protected]\",\n",
" password=\"changethis\"\n",
")"
"cell_type": "code",
"execution_count": null,
"id": "c9f4f75f-a4b6-4337-8bc7-147ace57a14f",
- "metadata": {},
+ "metadata": {
+ "run_control": {
+ "marked": false
+ }
+ },
"outputs": [],
"source": [
"# run this cell\n",
"# Filter out the countries from which commodities were imported\n",
"countries_from_which_commodities_are_imported = imports[\"Partner\"]\n",
"\n",
- "data_subjects_for_imported_commodities = sy.DataSubjectArray.from_objs(\n",
- " countries_from_which_commodities_are_imported\n",
- ")\n",
+ "# calculate min and max values of the trade value of imported commodities\n",
+ "trade_value_of_imported_commodites.min(), trade_value_of_imported_commodites.max()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "37e6457c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# run this cell\n",
"\n",
"# annotate traded value of imported commodities with dp metadata\n",
- "imports_data = sy.Tensor(\n",
- " trade_value_of_imported_commodites\n",
- ").annotated_with_dp_metadata(min_val=0, max_val=255, data_subjects=data_subjects_for_imported_commodities)"
+ "imports_data = sy.Tensor(trade_value_of_imported_commodites).annotated_with_dp_metadata(\n",
+ " min_val=0, \n",
+ " max_val=3e10, \n",
+ " data_subjects=countries_from_which_commodities_are_imported\n",
+ ")"
]
},
{
"# Filter out the countries to which commodities were exported\n",
"countries_to_which_commodities_are_exported = exports[\"Partner\"]\n",
"\n",
- "data_subjects_for_exported_commodities = sy.DataSubjectArray.from_objs(countries_to_which_commodities_are_exported)\n",
- "\n",
+ "# calculate min and max values of the trade value of exported commodities\n",
+ "trade_value_of_exported_commodites.min(), trade_value_of_exported_commodites.max()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6a2b9402",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# run this cell\n",
"\n",
"# annotate traded value of exported commodities with dp metadata\n",
- "exports_data = sy.Tensor(\n",
- " trade_value_of_exported_commodites\n",
- ").annotated_with_dp_metadata(min_val=0, max_val=1, data_subjects=data_subjects_for_exported_commodities)"
+ "exports_data = sy.Tensor(trade_value_of_exported_commodites).annotated_with_dp_metadata(\n",
+ " min_val=0, \n",
+ " max_val=3e10, \n",
+ " data_subjects=countries_to_which_commodities_are_exported\n",
+ ")"
]
},
{
|
nfs: remove legacy task
This fact is never used, let's remove the task. | ---
-- name: set_fact container_exec_cmd_nfs
- set_fact:
- container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
- when: containerized_deployment | bool
-
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
radosgw_user:
name: "{{ ceph_nfs_rgw_user }}"
|
Adds error checking for import_EGAP registration_metadata
## Purpose
Adds error handling for registration metadata for EGAP Registrations
## Changes
* Adds checks for what should be the `egap_registration_date` and the `egap_embargo_public_date` on the import_EGAP script | @@ -198,14 +198,24 @@ def main(guid, creator_username):
draft_registration_metadata = draft_registration.registration_metadata
# Retrieve EGAP registration date and potential embargo go-public date
+ if draft_registration_metadata.get('q4'):
egap_registration_date_string = draft_registration_metadata['q4']['value']
- egap_embargo_public_date_string = draft_registration_metadata['q12']['value']
-
egap_registration_date = dt.strptime(egap_registration_date_string, '%m/%d/%Y')
+ else:
+ logger.error(
+ 'DraftRegistration associated with Project {}'
+ 'does not have a valid registration date in registration_metadata'.format(project._id)
+ )
+ continue
+
+ if draft_registration_metadata.get('q12'):
+ egap_embargo_public_date_string = draft_registration_metadata['q12']['value']
egap_embargo_public_date = dt.strptime(egap_embargo_public_date_string, '%m/%d/%Y')
+ else:
+ egap_embargo_public_date = None
sanction_type = 'RegistrationApproval'
- if egap_embargo_public_date > dt.today():
+ if egap_embargo_public_date and (egap_embargo_public_date > dt.today()):
sanction_type = 'Embargo'
try:
@@ -216,6 +226,7 @@ def main(guid, creator_username):
'Unexpected error raised when attempting to silently register'
'project {}. Continuing...'.format(project._id))
logger.info(str(err))
+ continue
# Update contributors on project to Admin
contributors = project.contributor_set.all()
|
Fix incomlete display stp instance command in Huawei.VRP.get_spanning_tree
HG--
branch : feature/microservices | @@ -26,6 +26,8 @@ class Script(BaseScript):
status}
"""
cli_stp = self.cli("display stp brief")
+ if self.rx_stp_disabled.search(cli_stp):
+ return None
ports = {} # instance -> port -> attributes
for R in cli_stp.splitlines()[1:]:
if not R.strip():
@@ -103,10 +105,8 @@ class Script(BaseScript):
r"(?P<designated_bridge_id>\S+)\s/\s(?P<designated_port_id>\S+).*?",
re.MULTILINE | re.IGNORECASE)
- def process_mstp(self):
+ def process_mstp(self, ports=None):
check_d = re.compile("\s*\d+\s*")
- # Save port attributes
- ports = self.get_ports_attrs()
#
v = self.cli("display stp region-configuration")
match = self.rx_mstp_region.search(v)
@@ -141,7 +141,12 @@ class Script(BaseScript):
for instance_id in iv:
if instance_id not in ports:
continue
- for I in self.cli("display stp instance %s" % instance_id).split("-------\[")[0:]:
+ try:
+ instance_list = self.cli("display stp instance %s" % instance_id).split("-------\[")
+ except self.CLISyntaxError:
+ # Not support command "display stp instance NUM"
+ instance_list = self.cli("display stp").split("-------\[")
+ for I in instance_list[0:]:
# instance_id = int(instance_id)
if instance_id == 0:
match = self.rx_mstp0_bridge.search(I)
@@ -188,8 +193,11 @@ class Script(BaseScript):
return r
def execute(self):
- cli_stp = self.cli("display stp brief")
- if self.rx_stp_disabled.search(cli_stp):
+ # Save port attributes
+ # cli_stp = self.cli("display stp brief", cached=True)
+ ports = self.get_ports_attrs()
+ if ports:
+ return self.process_mstp(ports=ports)
+ else:
+ # No STP ports
return {"mode": None, "instances": []}
-
- return self.process_mstp()
|
avoid multiple conversions from mired to kelvin and vice versa
Avoid triple conversion while comparing against min / max color temperature when a new value is set. | @@ -36,6 +36,8 @@ DEFAULT_BRIGHTNESS = 255
DEFAULT_COLOR_TEMPERATURE = 333 # 3000 K
DEFAULT_MIN_MIREDS = 166 # 6000 K
DEFAULT_MAX_MIREDS = 370 # 2700 K
+DEFAULT_MIN_KELVIN = 2700
+DEFAULT_MAX_KELVIN = 6000
DEPENDENCIES = ['xknx']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
@@ -187,6 +189,24 @@ class KNXLight(Light):
if kelvin is not None else DEFAULT_MAX_MIREDS
return None
+ @property
+ def min_kelvin(self):
+ """Return the warmest color temperature this light supports in kelvin."""
+ if self.device.supports_color_temperature:
+ kelvin = self.device.min_kelvin
+ return kelvin \
+ if kelvin is not None else DEFAULT_MIN_KELVIN
+ return None
+
+ @property
+ def max_kelvin(self):
+ """Return the coldest color temperature this light supports in kelvin."""
+ if self.device.supports_color_temperature:
+ kelvin = self.device.max_kelvin
+ return kelvin \
+ if kelvin is not None else DEFAULT_MAX_KELVIN
+ return None
+
@property
def effect_list(self):
"""Return the list of supported effects."""
@@ -250,11 +270,11 @@ class KNXLight(Light):
elif self.device.supports_color_temperature and \
update_color_temp:
# change color temperature without ON telegram
- if mireds > self.max_mireds:
- mireds = self.max_mireds
- elif mireds < self.min_mireds:
- mireds = self.min_mireds
kelvin = int(color_util.color_temperature_mired_to_kelvin(mireds))
+ if kelvin > self.max_kelvin:
+ kelvin = self.max_kelvin
+ elif kelvin < self.min_kelvin:
+ kelvin = self.min_kelvin
await self.device.set_color_temperature(kelvin)
elif self.device.supports_tunable_white and \
update_tunable_white:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.