message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
config/core: Fix handling of depreciated parameters
Provide warning to user when attempting to set a depreciated
parameter instead of during validation and only raise the warning
if a value has been explicitly provided. | @@ -290,6 +290,9 @@ class ConfigurationPoint(object):
def set_value(self, obj, value=None, check_mandatory=True):
if self.deprecated:
+ if value is not None:
+ msg = 'Depreciated parameter supplied for "{}" in "{}". The value will be ignored.'
+ logger.warning(msg.format(self.name, obj.name))
return
if value is None:
if self.default is not None:
@@ -312,11 +315,9 @@ class ConfigurationPoint(object):
setattr(obj, self.name, value)
def validate(self, obj, check_mandatory=True):
- value = getattr(obj, self.name, None)
if self.deprecated:
- msg = 'Depreciated parameter supplied for "{}" in "{}". The value will be ignored.'
- logger.warning(msg.format(self.name, obj.name))
return
+ value = getattr(obj, self.name, None)
if value is not None:
self.validate_value(obj.name, value)
else:
|
add support for psqlextra extensions in reset_db
This attempts to suport the django-postgres-extra library wich is also a valid postgresq engine | @@ -112,6 +112,7 @@ Type 'yes' to continue, or 'no' to cancel: """ % (database_name,))
'django.db.backends.postgresql',
'django.db.backends.postgresql_psycopg2',
'django.db.backends.postgis',
+ 'psqlextra.backend',
))
if engine in SQLITE_ENGINES:
|
fixing example_runner.ipynb
added the missing import of utils as pointed out in an issue. | }
],
"source": [
+ "from naslib.utils import utils\n",
"config = utils.get_config_from_args(config_type='nas')\n",
"\n",
"logger = setup_logger(config.save + \"/log.log\")\n",
|
Remove tflearn package
This package is unmaintained and hasn't been updated since 2017. Keras is now the way to go.
Additionally, this package was broken because it requires TensorFlow 1.x and we are using 2.x | @@ -208,8 +208,6 @@ RUN pip install mpld3 && \
pip install plotly && \
pip install git+https://github.com/nicta/dora.git && \
pip install git+https://github.com/hyperopt/hyperopt.git && \
- # tflean. Deep learning library featuring a higher-level API for TensorFlow. http://tflearn.org
- pip install git+https://github.com/tflearn/tflearn.git && \
pip install fitter && \
pip install langid && \
# Delorean. Useful for dealing with datetime
|
Add arguments __repr__ in Distribution base class
Summary: Pull Request resolved: | @@ -221,4 +221,8 @@ class Distribution(object):
raise ValueError('The value argument must be within the support')
def __repr__(self):
- return self.__class__.__name__ + '()'
+ param_names = [k for k, _ in self.arg_constraints.items()]
+ args_string = ', '.join(['{}: {}'.format(p, self.__dict__[p]
+ if self.__dict__[p].dim() == 0
+ else self.__dict__[p].size()) for p in param_names])
+ return self.__class__.__name__ + '(' + args_string + ')'
|
define int bounds for _SizesToOffsets
This patch defines integer bounds for `_SizesToOffsets`. | @@ -3719,6 +3719,7 @@ class _SizesToOffsets(Array):
def __init__(self, sizes):
assert sizes.ndim == 1
assert sizes.dtype == int
+ assert sizes._intbounds[0] >= 0
self._sizes = sizes
super().__init__(args=[sizes], shape=(sizes.shape[0]+1,), dtype=int)
@@ -3730,6 +3731,11 @@ class _SizesToOffsets(Array):
if not where:
return Range(self.shape[0]) * appendaxes(unaligned, self.shape[:1])
+ def _intbounds_impl(self):
+ n = self._sizes.size._intbounds[1]
+ m = self._sizes._intbounds[1]
+ return 0, (0 if n == 0 or m == 0 else n * m)
+
class LoopConcatenate(Array):
@types.apply_annotations
|
Changes GSTModelPack method name: get_gst_circuits_list -> get_gst_circuits.
This follows our guidelines better by not putting the names of types
within function/method names. | @@ -232,7 +232,7 @@ class GSTModelPack(ModelPack):
kwargs.get('add_default_protocol', False),
)
- def get_gst_circuits_list(self, max_max_length, qubit_labels=None, fpr=False, lite=True, **kwargs):
+ def get_gst_circuits(self, max_max_length, qubit_labels=None, fpr=False, lite=True, **kwargs):
""" Construct a :class:`pygsti.objects.BulkCircuitList` from this modelpack.
Parameters
@@ -274,14 +274,14 @@ class GSTModelPack(ModelPack):
assert(len(qubit_labels) == len(self._sslbls)), \
"Expected %d qubit labels and got: %s!" % (len(self._sslbls), str(qubit_labels))
- structs = _make_lsgst_lists(self._target_model(qubit_labels), # Note: only need gate names here
+ lists = _make_lsgst_lists(self._target_model(qubit_labels), # Note: only need gate names here
self.prep_fiducials(qubit_labels),
self.meas_fiducials(qubit_labels),
self.germs(qubit_labels, lite),
list(_gen_max_length(max_max_length)),
fidpairs,
**kwargs)
- return structs[-1] # just return final struct (for longest sequences)
+ return lists[-1] # just return final list (for longest sequences)
class RBModelPack(ModelPack):
|
run bimpm configuration
map=0.541182 ndcg@5=0.596185 ndcg@3=0.509645 | "num_iters": 400,
"display_interval": 10,
"test_weights_iters": 400,
- "optimizer": "adadelta",
- "learning_rate": 0.0001
+ "optimizer": "adam",
+ "learning_rate": 0.001
},
"inputs": {
"share": {
"model_path": "./matchzoo/models/",
"model_py": "bimpm.BiMPM",
"setting": {
- "hidden_size": 100,
+ "hidden_size": 50,
"channel": 50,
- "aggre_size": 200,
+ "aggre_size": 50,
"with_full_match": true,
"with_maxpool_match": true,
"with_attentive_match": true,
{
"object_name": "rank_hinge_loss" ,
"object_params": {
- "margin": 1.0
+ "margin": 0.5
}
}
],
|
Properly handle User-Agent when AXES_ONLY_USER_FAILURES is set
Fixes access to undefined variable in _query_user_attempts and
respects documentation of the settings. | @@ -41,7 +41,7 @@ def _query_user_attempts(request):
else:
params['ip_address'] = ip
- if settings.AXES_USE_USER_AGENT:
+ if settings.AXES_USE_USER_AGENT and not settings.AXES_ONLY_USER_FAILURES:
params['user_agent'] = ua
attempts = AccessAttempt.objects.filter(**params)
@@ -75,7 +75,7 @@ def get_cache_key(request_or_obj):
else:
attributes = ip
- if settings.AXES_USE_USER_AGENT:
+ if settings.AXES_USE_USER_AGENT and not settings.AXES_ONLY_USER_FAILURES:
attributes += ua
cache_hash_key = 'axes-{}'.format(md5(attributes).hexdigest())
|
Move pypowervm requirement to 1.1.12
pypowervm needs to be 1.1.12 or later for PowerVM vSCSI cinder volume
support [1].
[1] | @@ -61,7 +61,7 @@ microversion-parse>=0.2.1 # Apache-2.0
os-xenapi>=0.3.1 # Apache-2.0
tooz>=1.58.0 # Apache-2.0
cursive>=0.2.1 # Apache-2.0
-pypowervm>=1.1.11 # Apache-2.0
+pypowervm>=1.1.12 # Apache-2.0
os-service-types>=1.2.0 # Apache-2.0
taskflow>=2.16.0 # Apache-2.0
python-dateutil>=2.5.3 # BSD
|
fix add option to disable to use of security groups
In advanced zones, security groups are supported only on the KVM hypervisor.
With hypervisor other than KVM, we need a way to disable the use of SGs
ref: | @@ -168,9 +168,15 @@ def get_security_groups(conn, vm_):
'''
Return a list of security groups to use, defaulting to ['default']
'''
- return config.get_cloud_config_value('securitygroup', vm_, __opts__,
- default=['default'])
-
+ securitygroup_enabled = config.get_cloud_config_value(
+ 'securitygroup_enabled', vm_, __opts__, default=True
+ )
+ if securitygroup_enabled:
+ return config.get_cloud_config_value(
+ 'securitygroup', vm_, __opts__, default=['default']
+ )
+ else:
+ return False
def get_password(vm_):
'''
@@ -281,9 +287,11 @@ def create(vm_):
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
- 'ex_security_groups': get_security_groups(conn, vm_)
}
+ if get_security_groups(conn, vm_) is not False:
+ kwargs['ex_security_groups'] = get_security_groups(conn, vm_)
+
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
|
[sync] Don't exclude root dir from sync events
This allows us to act early when we attempt to sync the deletion of the root directory. | @@ -1401,10 +1401,6 @@ class SyncEngine:
:returns: Whether the path is excluded from syncing.
"""
- # Is root folder?
- if path in ("/", ""):
- return True
-
dirname, basename = osp.split(path)
# Is in excluded files?
|
Support new primitives for machine-independent zo files.
Still missing the ability to enable them on the command line. | @@ -18,7 +18,7 @@ from pycket.values_string import W_String
from pycket.values_parameter import top_level_config
from pycket.error import SchemeException
from pycket import pycket_json
-from pycket.prims.expose import prim_env, expose, default
+from pycket.prims.expose import prim_env, expose, default, expose_val
from pycket.prims.general import make_pred
from pycket.prims.correlated import W_Correlated
from pycket.prims.vector import vector
@@ -135,6 +135,16 @@ our_vm_bytes = values.W_Bytes.from_string("pycket")
def vm_bytes():
return our_vm_bytes
+w_pycket_sym = values.W_Symbol.make("pycket")
+
+# FIXME: control initialization of this from command line using -W
+expose_val("current-compile-target-machine", values_parameter.W_Parameter(w_pycket_sym))
+
+
+@expose("compile-target-machine?", [values.W_Symbol])
+def compile_machine_target_p(v):
+ return values.W_Bool.make(v is w_pycket_sym)
+
@expose("hash->linklet-bundle", [W_Object])
def hash_to_linklet_bundle(content):
return W_LinkletBundle(content)
|
Set reasonable default ANSIBLE_COLLECTIONS_PATHS
When the current role is part of a collection, set the collection
search path for ANSIBLE to the parent collection, so that any playbooks
referencing collections within the same namespace can do their job
appropriately | @@ -413,6 +413,25 @@ def default_options(self):
@property
def default_env(self):
+ # Finds if the current project is part of an ansible_collections hierarchy
+ collection_indicator = "ansible_collections"
+ collections_paths_list = [
+ util.abs_path(
+ os.path.join(self._config.scenario.ephemeral_directory, "collections")
+ )
+ ]
+ if collection_indicator in self._config.project_directory:
+ collection_path, right = self._config.project_directory.split(
+ collection_indicator
+ )
+ collections_paths_list.append(util.abs_path(collection_path))
+ collections_paths_list.extend(
+ [
+ util.abs_path(os.path.join(os.path.expanduser("~"), ".ansible")),
+ "/usr/share/ansible/collections",
+ "/etc/ansible/collections",
+ ]
+ )
env = util.merge_dicts(
os.environ,
{
@@ -434,23 +453,7 @@ def default_env(self):
"/etc/ansible/roles",
]
),
- "ANSIBLE_COLLECTIONS_PATHS": ":".join(
- [
- util.abs_path(
- os.path.join(
- self._config.scenario.ephemeral_directory, "collections"
- )
- ),
- util.abs_path(
- os.path.join(self._config.project_directory, os.path.pardir)
- ),
- util.abs_path(
- os.path.join(os.path.expanduser("~"), ".ansible")
- ),
- "/usr/share/ansible/collections",
- "/etc/ansible/collections",
- ]
- ),
+ "ANSIBLE_COLLECTIONS_PATHS": ":".join(collections_paths_list),
"ANSIBLE_LIBRARY": ":".join(self._get_modules_directories()),
"ANSIBLE_FILTER_PLUGINS": ":".join(
[
|
[add] logger - use env for rotating log params
Two environment variables were added to allow customization of the rotating log parameters:
`FLEXGET_LOG_MAXBYTES` defines the maximum bytes per file (default 1 MB)
`FLEXGET_LOG_MAXCOUNT` defines the maximum number of files (default 9) | @@ -10,6 +10,7 @@ import sys
import threading
import uuid
import warnings
+import os
from flexget import __version__
from flexget.utils.tools import io_encoding
@@ -18,6 +19,9 @@ from flexget.utils.tools import io_encoding
TRACE = 5
# A level more detailed than INFO
VERBOSE = 15
+# environment variables to modify rotating log parameters from defaults of 1 MB and 9 files
+ENV_MAXBYTES = 'FLEXGET_LOG_MAXBYTES'
+ENV_MAXCOUNT = 'FLEXGET_LOG_MAXCOUNT'
# Stores `task`, logging `session_id`, and redirected `output` stream in a thread local context
local_context = threading.local()
@@ -198,7 +202,9 @@ def start(filename=None, level=logging.INFO, to_console=True, to_file=True):
formatter = FlexGetFormatter()
if to_file:
- file_handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1000 * 1024, backupCount=9)
+ file_handler = logging.handlers.RotatingFileHandler(filename,
+ maxBytes=int(os.environ.get(ENV_MAXBYTES, 1000 * 1024)),
+ backupCount=int(os.environ.get(ENV_MAXCOUNT, 9)))
file_handler.setFormatter(formatter)
file_handler.setLevel(level)
logger.addHandler(file_handler)
|
Update setup.py
bumping version number | @@ -38,8 +38,8 @@ def find_package_data(data_root, package_root):
# #########################
-VERSION = '0.7.3dev0'
-ISRELEASED = False
+VERSION = '0.8.0'
+ISRELEASED = True
__version__ = VERSION
# #########################
|
Temporarily display deployed_grpc dagster-graphql coverage until we resolve thread-safety flakiness
is there an issue I can link to this?
Summary: As title
Test Plan: BK
Reviewers: sashank, alangenfeld | @@ -274,7 +274,9 @@ def graphql_pg_extra_cmds_fn(_):
"-sqlite_instance_hosted_user_process_env",
"-sqlite_instance_multi_location",
"-sqlite_instance_managed_grpc_env",
- "-sqlite_instance_deployed_grpc_env",
+ # Temporarily disabling due to thread-safety issues with
+ # deployed gRPC servers (https://github.com/dagster-io/dagster/issues/3404)
+ # "-sqlite_instance_deployed_grpc_env",
],
),
ModuleBuildSpec(
@@ -287,7 +289,9 @@ def graphql_pg_extra_cmds_fn(_):
"-postgres_instance_hosted_user_process_env",
"-postgres_instance_multi_location",
"-postgres_instance_managed_grpc_env",
- "-postgres_instance_deployed_grpc_env",
+ # Temporarily disabling due to thread-safety issues with
+ # deployed gRPC servers (https://github.com/dagster-io/dagster/issues/3404)
+ # "-postgres_instance_deployed_grpc_env",
],
),
ModuleBuildSpec(
|
Export adaptive subdivison parameters to RPR scene file using scene render size
PURPOSE
A quick fix of RPR file export to apply adaptive subdivision parameters.
EFFECT OF CHANGE
added adaptive subdivision export to RPR file. | @@ -72,6 +72,10 @@ class ExportEngine(Engine):
self.rpr_context.set_parameter(pyrpr.CONTEXT_PREVIEW, False)
scene.rpr.export_ray_depth(self.rpr_context)
+ # adaptive subdivision will be limited to the current scene render size
+ self.rpr_context.enable_aov(pyrpr.AOV_COLOR)
+ self.rpr_context.sync_auto_adapt_subdivision(self.rpr_context.width, self.rpr_context.height)
+
self.rpr_context.sync_portal_lights()
# Exported scene will be rendered vertically flipped, flip it back
|
Implement UndefinedType::typeMeta.
Summary: Pull Request resolved: | @@ -9,7 +9,7 @@ ScalarType UndefinedType::scalarType() const {
return ScalarType::Undefined;
}
caffe2::TypeMeta UndefinedType::typeMeta() const {
- AT_ERROR("typeMeta not defined for UndefinedType");
+ return scalarTypeToTypeMeta(scalarType());
}
Backend UndefinedType::backend() const {
return Backend::Undefined;
|
[Tune] PTL replace deprecated `running_sanity_check` with `sanity_checking`
`running_sanity_check` was deprecated and removed in in favor of `sanity_checking` | @@ -174,7 +174,7 @@ class TuneReportCallback(TuneCallback):
def _get_report_dict(self, trainer: Trainer, pl_module: LightningModule):
# Don't report if just doing initial validation sanity checks.
- if trainer.running_sanity_check:
+ if trainer.sanity_checking:
return
if not self._metrics:
report_dict = {
@@ -228,7 +228,7 @@ class _TuneCheckpointCallback(TuneCallback):
self._filename = filename
def _handle(self, trainer: Trainer, pl_module: LightningModule):
- if trainer.running_sanity_check:
+ if trainer.sanity_checking:
return
step = f"epoch={trainer.current_epoch}-step={trainer.global_step}"
with tune.checkpoint_dir(step=step) as checkpoint_dir:
|
Updated run_danesfield wrt new obj file output location
The roof_geon_extraction tool now moves the output obj files to the
working_dir, updated the run_danesfield script to reflect this change. | @@ -414,12 +414,11 @@ def main(config_fpath):
# Buildings to DSM
#############################################
logging.info('---- Running buildings to dsm ----')
- objs_dir = os.path.join(working_dir, "output_obj")
# Generate the output DSM
output_dsm = os.path.join(working_dir, "buildings_to_dsm_DSM.tif")
cmd_args = [
- objs_dir,
+ working_dir,
dtm_file,
output_dsm]
logging.info(cmd_args)
@@ -428,7 +427,7 @@ def main(config_fpath):
# Generate the output CLS
output_cls = os.path.join(working_dir, "buildings_to_dsm_CLS.tif")
cmd_args = [
- objs_dir,
+ working_dir,
dtm_file,
output_cls,
'--render_cls']
|
Update Node instructions to v10.x
The required Node version was updated from v6.x to v10.x.
This commit updates the docs to reflect that.
Ref: learningequality/kolibri#4524 | @@ -28,12 +28,12 @@ Install environment dependencies
#. Install `Python <https://www.python.org/downloads/windows/>`__ if you are on Windows, on Linux and OSX Python is preinstalled (recommended versions 2.7+ or 3.4+).
#. Install `pip <https://pypi.python.org/pypi/pip>`__ package installer.
-#. Install `Node.js <https://nodejs.org/en/>`__ (version 6 is required).
+#. Install `Node.js <https://nodejs.org/en/>`__ (version 10 is required).
#. Install `Yarn <https://yarnpkg.com/>`__ according the `instructions specific for your OS <https://yarnpkg.com/en/docs/install/>`__.
#. Install and set up the `Git LFS extension <https://git-lfs.github.com/>`__. Remember to initialize with ``git lfs install`` after installing.
.. note::
- Installing Node.js version 6.x:
+ Installing Node.js version 10.x:
* On a Mac, you can use the `Homebrew <http://brew.sh/>`__ package manager.
* On Ubuntu/Debian, either install Node.js via `nvm <https://github.com/creationix/nvm>`__ or use the `apt` package manager to install a system-wide version and block upgrades:
@@ -41,16 +41,13 @@ Install environment dependencies
.. code-block:: bash
# Add apt sources from nodesource.com
- curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -
- # Verify the latest version 6 of nodejs
- apt-cache showpkg nodejs-legacy
- # Install latest version 6 nodejs
- sudo apt install nodejs=6.14.1-1nodesource1
+ curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash -
+ # Install latest version 10 nodejs
+ sudo apt install nodejs
# Make sure it doesn't get upgrade to later versions available in
# the official repos.
sudo apt-mark hold nodejs
-
Ready for the fun part in the Terminal? Here we go!
|
updates craft mailable command description
Referenced in issue | @@ -4,7 +4,7 @@ from ..commands import BaseScaffoldCommand
class MailableCommand(BaseScaffoldCommand):
"""
- Creates a new Job.
+ Creates a new Mailable.
mailable
{name : Name of the job you want to create}
|
new tests
ID methods getHGT, bbox, is_processed, compression, export2sqlite, getGammaImages
SAFE method getOSV | import pyroSAR
from pyroSAR.spatial import crsConvert, haversine
+from pyroSAR.ancillary import finder
import pytest
+import shutil
import os
testdir = os.getenv('TESTDATA_DIR', 'pyroSAR/tests/data/')
@@ -12,6 +14,7 @@ testcases = [
'bbox_area': 7.573045244595988,
'compression': 'zip',
'corners': {'ymax': 52.183979, 'ymin': 50.295261, 'xmin': 8.017178, 'xmax': 12.0268},
+ 'hgt_len': 15,
'lines': 16685,
'outname': 'S1A__IW___A_20150222T170750',
'orbit': 'A',
@@ -48,8 +51,8 @@ class Test_Metadata():
assert scene['pyro'].stop == scene['stop']
assert scene['pyro'].sensor == scene['sensor']
assert scene['pyro'].spacing == scene['spacing']
- assert scene['pyro'].is_processed('data/') is False
assert scene['pyro'].bbox().getArea() == scene['bbox_area']
+ assert len(scene['pyro'].getHGT()) == scene['hgt_len']
def test_identify_fail():
@@ -61,14 +64,29 @@ def test_export2dict():
pass
-def test_archive():
+def test_scene():
scene = 'pyroSAR/tests/data/S1A_IW_GRDH_1SDV_20150222T170750_20150222T170815_004739_005DD8_3768.zip'
dbfile = os.path.join('pyroSAR/tests/data/', 'scenes.db')
- if os.path.isfile(dbfile):
- os.remove(dbfile)
with pyroSAR.Archive(dbfile) as db:
db.insert(scene, verbose=True)
assert db.size == (1, 0)
+ id = pyroSAR.identify(scene)
+ test_dir = 'pyroSAR/tests/data/test'
+ os.makedirs(test_dir)
+ id.bbox(outname='pyroSAR/tests/data/test/bbox_test.shp')
+ assert id.is_processed(test_dir) is False
+ id.unpack('pyroSAR/tests/data/test')
+ assert id.compression is None
+ os.remove(dbfile)
+ id.export2sqlite(dbfile)
+ with pytest.raises(IOError):
+ id.getGammaImages()
+ assert id.getGammaImages(id.scene) == []
+ osvdir = os.path.join(id.scene, 'osv')
+ id.getOSV(osvdir)
+ assert len(finder(os.path.join(osvdir, 'POEORB'), ['S1A*EOF'])) == 3
+ shutil.rmtree(test_dir)
+ os.remove(dbfile)
def test_crsConvert():
|
check likelihood levels only if loop is executed
it is not guaranteed if the maxcall etc are used | @@ -1168,6 +1168,8 @@ class DynamicSampler(object):
# sample past the original bounds "for free".
for i in range(1):
+ iterated_batch = False
+ # To identify if the loop below was executed or not
for it, results in enumerate(
self.sampler.sample(dlogz=dlogz_batch,
logl_max=logl_max,
@@ -1198,11 +1200,11 @@ class DynamicSampler(object):
self.ncall += nc
self.eff = 100. * self.it / self.ncall
self.it += 1
-
+ iterated_batch = True
yield (worst, ustar, vstar, loglstar, nc, worst_it, boundidx,
bounditer, self.eff)
- if loglstar < logl_max:
+ if iterated_batch and loglstar < logl_max:
warnings.warn('Warning. The maximum likelihood not reached '
'in the batch. '
'You may not have enough livepoints')
|
integ-tests: retry without --keep-logs on cluster deletion errors
keep-logs require a stack update that cannot be executed in case the stack is in a failure state | @@ -173,13 +173,19 @@ class ClustersFactory:
logging.info("Sleeping for 60 seconds in case cluster is not ready yet")
time.sleep(60)
- @retry(stop_max_attempt_number=10, wait_fixed=5000, retry_on_exception=retry_if_subprocess_error)
+ @retry(stop_max_attempt_number=5, wait_fixed=5000, retry_on_exception=retry_if_subprocess_error)
def destroy_cluster(self, name, keep_logs=False):
"""Destroy a created cluster."""
logging.info("Destroying cluster {0}".format(name))
if name in self.__created_clusters:
keep_logs = keep_logs or (self._keep_logs_on_failure and not self.__created_clusters[name].create_complete)
+ try:
self.__created_clusters[name].delete(keep_logs=keep_logs)
+ except subprocess.CalledProcessError as e:
+ logging.error(
+ "Failed when deleting cluster %s with error %s. Retrying deletion without --keep-logs.", name, e
+ )
+ self.__created_clusters[name].delete(keep_logs=False)
del self.__created_clusters[name]
logging.info("Cluster {0} deleted successfully".format(name))
else:
|
pe: better handle invalid import name
closes | @@ -903,8 +903,13 @@ class PE(object):
idx+=1
continue
+ try:
funcname = ibn.Name
+ except UnicodeDecodeError:
+ funcname = None
+ logger.warning("pe: failed to read import name at RVA 0x%x", ibn_rva)
+ if funcname is not None:
imports_list.append((save_name + arrayoff, libname, funcname))
idx += 1
|
Add a mention of the True/False returns with __virtual__()
And their relationship to `__virtualname__`.
Fixes | @@ -405,6 +405,10 @@ similar to the following:
return __virtualname__
return False
+Note that the ``__virtual__()`` function will return either a ``True`` or ``False``
+value. If it returns a ``True`` value, this ``__virtualname__`` module-level attribute
+can be set as seen in the above example. This is the name that the module should be
+referred to as.
Documentation
=============
|
[CMSIS-NN] Stop test generating 1x1 and 1xn Conv2d
I believe the flakiness in is the small chance of generating a
1x1 or 1xn convolution which allows for a different buffer size:
Therefore, careful selection of the distribution should alleviate
this issue. | @@ -37,7 +37,7 @@ namespace cmsisnn {
static std::random_device rd;
static std::mt19937 gen(rd());
-static std::uniform_int_distribution<> fake_parameters(1, 100);
+static std::uniform_int_distribution<> fake_parameters(2, 100);
class CMSISNNCalculatedBufferSize : public testing::TestWithParam<std::array<int32_t, 3>> {};
@@ -97,8 +97,7 @@ TEST(CMSISNNConv2dBufferSize, Conv1xN) {
ASSERT_EQ(conv2d_1xn(kHasMVE, 32), 0);
}
-// Test disabled, see https://github.com/apache/tvm/issues/10748
-TEST(DISABLED_CMSISNNConv2dBufferSize, Default) {
+TEST(CMSISNNConv2dBufferSize, Default) {
int32_t any = fake_parameters(gen);
int32_t input_c = fake_parameters(gen);
|
[query/shuffler] somewhat tune branching factor to dataset & log phases
For example, a tiny table with one partition really ought not to use 64
branches. In the future, we should track the byte-size of partitions and
just use a local sort for one partition tables. | @@ -109,7 +109,13 @@ object LowerDistributedSort {
val oversamplingNum = 3
val seed = 7L
- val defaultBranchingFactor = ctx.getFlag("shuffle_max_branch_factor").toInt
+ val maxBranchingFactor = ctx.getFlag("shuffle_max_branch_factor").toInt
+ val defaultBranchingFactor = if (inputStage.numPartitions < maxBranchingFactor) {
+ Math.max(2, inputStage.numPartitions)
+ } else {
+ maxBranchingFactor
+ }
+
val sizeCutoff = ctx.getFlag("shuffle_cutoff_to_local_sort").toInt
val (keyToSortBy, _) = inputStage.rowType.select(sortFields.map(sf => sf.field))
@@ -119,6 +125,7 @@ object LowerDistributedSort {
val initialTmpPath = ctx.createTmpPath("hail_shuffle_temp_initial")
val writer = PartitionNativeWriter(spec, keyToSortBy.fieldNames, initialTmpPath, None, None)
+ log.info("DISTRIBUTED SORT: PHASE 1: WRITE DATA")
val initialStageDataRow = CompileAndEvaluate[Annotation](ctx, inputStage.mapCollectWithGlobals(relationalLetsAbove) { part =>
WritePartition(part, UUID4(), writer)
}{ case (part, globals) =>
@@ -230,6 +237,7 @@ object LowerDistributedSort {
}
})
+ log.info(s"DISTRIBUTED SORT: PHASE ${i+1}: STAGE 1: SAMPLE VALUES FROM PARTITIONS")
// Going to check now if it's fully sorted, as well as collect and sort all the samples.
val pivotsWithEndpointsAndInfoGroupedBySegmentNumber = CompileAndEvaluate[Annotation](ctx, pivotsPerSegmentAndSortedCheck)
.asInstanceOf[IndexedSeq[Row]].map(x => (x(0).asInstanceOf[IndexedSeq[Row]], x(1).asInstanceOf[Boolean], x(2).asInstanceOf[Row], x(3).asInstanceOf[IndexedSeq[Row]], x(4).asInstanceOf[IndexedSeq[Row]]))
@@ -278,6 +286,7 @@ object LowerDistributedSort {
MakeTuple.ordered(IndexedSeq(segmentIdx, StreamDistribute(partitionStream, ArrayRef(pivotsWithEndpointsGroupedBySegmentIdx, indexIntoPivotsArray), path, StructCompare(keyToSortBy, keyToSortBy, sortFields.toArray), spec)))
}
+ log.info(s"DISTRIBUTED SORT: PHASE ${i+1}: STAGE 2: DISTRIBUTE")
val distributeResult = CompileAndEvaluate[Annotation](ctx, distribute)
.asInstanceOf[IndexedSeq[Row]].map(row => (
row(0).asInstanceOf[Int],
@@ -335,6 +344,7 @@ object LowerDistributedSort {
WritePartition(sortedStream, UUID4(), writer)
}
+ log.info(s"DISTRIBUTED SORT: PHASE ${i+1}: LOCALLY SORT FILES")
val sortedFilenames = CompileAndEvaluate[Annotation](ctx, sortedFilenamesIR).asInstanceOf[IndexedSeq[Row]].map(_(0).asInstanceOf[String])
val newlySortedSegments = loopState.smallSegments.zip(sortedFilenames).map { case (sr, newFilename) =>
OutputPartition(sr.indices, sr.interval, IndexedSeq(initialTmpPath + newFilename))
|
Removed reference to Kivy VM
Removed reference to Kivy VM as it doesn't actually exist | @@ -22,8 +22,6 @@ recommend targeting Python 3 on Android, but you can target both
Python 3 and Python 2 regardless of which version you use with
buildozer on the desktop.
-We provide a ready-to-use [Virtual Machine for Virtualbox](https://kivy.org/#download).
-
Note that this tool has nothing to do with the eponymous online build service
[buildozer.io](http://buildozer.io).
@@ -175,41 +173,6 @@ config, along with the environment variables that would override them.
- ``package.name`` -> ``$APP_PACKAGE_NAME``
- ``p4a.source_dir`` -> ``$APP_P4A_SOURCE_DIR``
-
-## Buildozer Virtual Machine
-
-The current virtual machine (available via https://kivy.org/downloads/) allow
-you to have a ready to use vm for building android application.
-
-### Using shared folders
-
-If the Virtualbox Guest tools are outdated, install the latest one:
-
-- in the Virtualbox: `Devices` -> `Install Guest Additions CD images`
-- in the guest/linux: Go to the cdrom and run the installer
-- reboot the vm
-
-VirtualBox filesystem doesn't support symlink anymore (don't
-try the setextradata solution, it doesn't work.). So you must
-do the build outside the shared folder. One solution:
-
-- `sudo mkdir /build`
-- `sudo chown kivy /build`
-- In your buildozer.spec, section `[buildozer]`, set `build_dir = /build/buildozer-myapp`
-
-
-### Using your devices via the VM
-
-There is a little icon on the bottom left that represent an USB plug.
-Select it, and select your android device on it. Then you can check:
-
- buildozer android adb -- devices
-
-If it doesn't, use Google. They are so many differents way / issues
-depending your phone that Google will be your only source of
-information, not us :)
-
-
## Support
If you need assistance, you can ask for help on our mailing list:
|
Split exception assertion in pieces
Amazon Linux in particular has some funny encoding problems. By
splitting the assertion into multiple ones, we can provide roughly the
same assurance, but the test will actually pass. | @@ -1734,7 +1734,9 @@ class ConfigTestCase(TestCase):
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate({'item': {'sides': '4', 'color': 'blue'}}, TestConf.serialize())
if JSONSCHEMA_VERSION >= _LooseVersion('3.0.0'):
- self.assertIn('\'4\' is not of type \'boolean\'', excinfo.exception.message)
+ self.assertIn("'4'", excinfo.exception.message)
+ self.assertIn("is not of type", excinfo.exception.message)
+ self.assertIn("'boolean'", excinfo.exception.message)
else:
self.assertIn('is not valid under any of the given schemas', excinfo.exception.message)
@@ -1840,7 +1842,9 @@ class ConfigTestCase(TestCase):
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate({'item': ['maybe']}, TestConf.serialize())
if JSONSCHEMA_VERSION >= _LooseVersion('3.0.0'):
- self.assertIn('\'maybe\' is not one of [\'yes\']', excinfo.exception.message)
+ self.assertIn("'maybe'", excinfo.exception.message)
+ self.assertIn("is not one of", excinfo.exception.message)
+ self.assertIn("'yes'", excinfo.exception.message)
else:
self.assertIn('is not valid under any of the given schemas', excinfo.exception.message)
@@ -1895,7 +1899,9 @@ class ConfigTestCase(TestCase):
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate({'item': ['maybe']}, TestConf.serialize())
if JSONSCHEMA_VERSION >= _LooseVersion('3.0.0'):
- self.assertIn('\'maybe\' is not one of [\'yes\']', excinfo.exception.message)
+ self.assertIn("'maybe'", excinfo.exception.message)
+ self.assertIn("is not one of", excinfo.exception.message)
+ self.assertIn("'yes'", excinfo.exception.message)
else:
self.assertIn('is not valid under any of the given schemas', excinfo.exception.message)
|
adalog/image: minor update for code coverage
TN: | @@ -22,6 +22,7 @@ procedure Main is
or Logic_Any (Empty_Array)
or Logic_All (Empty_Array))
and Equals (X, Y)
+ and Logic_Any ((1 => True_Rel))
and Logic_All ((1 => True_Rel));
begin
X.Dbg_Name := new String'("X");
|
Update sso-ldap.md
Updated what a 'forest' means. | @@ -70,7 +70,7 @@ Yes, using the [bulk import tool](https://docs.mattermost.com/deployment/bulk-lo
##### Can I connect to multiple AD servers?
-Not right now, need to connect the instances in a forest.
+Not right now. You'll need to connect the instances in a forest (a collection of LDAP domains).
Consider upvoting the [feature request](https://mattermost.uservoice.com/forums/306457-general/suggestions/13589904-add-the-abilitiry) on our forum.
|
bump_release: simplify conditionals
Also remove a redundant early return. The last remaining part of the run
method is predicated on `not is_scratch_build()` already. | @@ -272,17 +272,14 @@ class BumpReleasePlugin(PreBuildPlugin):
user_provided_release=True)
return
- if release:
- if not self.append:
+ if release and not self.append:
self.log.debug("release set explicitly so not incrementing")
if not is_scratch_build(self.workflow):
self.check_build_existence_for_explicit_release(component, version, release)
dockerfile_labels[release_label] = release
else:
- return
-
- if not release or self.append:
+ # release not set or release should be appended
self.next_release_general(component, version, release, release_label,
dockerfile_labels)
|
Remove nb_to_doc.py conversion
This step is not needed anymore, since now we are using nbsphinx to
build the docs from notebooks. | @@ -11,15 +11,9 @@ BUILDDIR = _build
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-.PHONY: help Makefile examples
+.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-tutorial:
- tools/nb_to_doc.py examples/tutorial
-
-examples:
- tools/nb_to_doc.py examples/example_05_08_01 examples/example_05_08_02 examples/example_05_09_01 examples/example_05_09_02 examples/example_05_09_04 examples/example_05_09_05 examples/example_05_09_06 examples/example_05_09_09
|
Fixed test_docs_with_domain
This looks like a bad merge from when the DefaultConsumption migration was done. | @@ -22,7 +22,6 @@ from corehq.feature_previews import all_previews
DOC_PROVIDERS = {
DocTypeIDProvider(['Application']),
DocTypeIDProvider(['CommtrackConfig']),
- DocTypeIDProvider(['DefaultConsumption']),
ViewIDProvider('CommCareMultimedia', 'hqmedia/by_domain', DomainKeyGenerator()),
DocTypeIDProvider(['MobileAuthKeyRecord']),
DocTypeIDProvider(['Product']),
|
Utility: fix hex dump on Python 2.
Also adds vertical bars around the ASCII dump. | import sys
import string
import io
+import six
from . import conversion
@@ -101,7 +102,7 @@ def dump_hex_data(data, start_address=0, width=8, output=None, print_ascii=True)
break
if print_ascii:
- s = ""
+ s = "|"
for n in range(start_i, start_i + line_width):
if n >= len(data):
break
@@ -115,7 +116,7 @@ def dump_hex_data(data, start_address=0, width=8, output=None, print_ascii=True)
d = conversion.u32le_list_to_byte_list([d])
d.reverse()
s += "".join((chr(b) if (chr(b) in _PRINTABLE) else '.') for b in d)
- output.write(" " + s)
+ output.write(" " + s + "|")
output.write("\n")
@@ -123,6 +124,6 @@ def dump_hex_data_to_str(data, **kwargs):
"""! @brief Returns a string with data formatted as hex.
@see dump_hex_data()
"""
- sio = io.StringIO()
+ sio = six.StringIO()
dump_hex_data(data, output=sio, **kwargs)
return sio.getvalue()
|
Fixes bug in Circuit.expand_instruments_and_separate_povm(...)
Typo in a variable name ('cir' variable was used instead of 'circuit')
when recursively calling a subroutine caused the
expand_instruments_and_separate_povm to break on circuits with multiple
instruments. Fixed now, but we should add such a unit test in the future. | @@ -3723,7 +3723,7 @@ class Circuit(object):
else:
new_ootree = None
- add_expanded_circuit_outcomes(cir[0:k] + Circuit((expanded_layer_lbl,)) + cir[k + 1:],
+ add_expanded_circuit_outcomes(circuit[0:k] + Circuit((expanded_layer_lbl,)) + circuit[k + 1:],
running_outcomes + selected_instrmt_members, new_ootree, k + 1)
break
|
Typos in example00
1. Changed "ANIT-symmetric" into "ANTI-symmetric" in the comment
2. Replaced == with = | @@ -109,7 +109,7 @@ print(" * |10> not invariant under parity! It represents the physical symmetric
print('\n\nprint pblock=-1 basis:\n')
#
print(basis_singlet)
-print(" * |10> here represents the physical ANIT-symmetric superposition 1/sqrt(2)(|10> - |01>) [see bottom note when printing the symmetry-reduced basis]")
+print(" * |10> here represents the physical ANTI-symmetric superposition 1/sqrt(2)(|10> - |01>) [see bottom note when printing the symmetry-reduced basis]")
print(" * NOTE: same state |01> is used to label both the symmetric and antisymmetric superposition because in this cases quspin uses the smallest integer from the integer representations of the states comprising the superposition states.\n")
#
#--------------------------------------------------
@@ -129,7 +129,7 @@ print(psi_s)
#
# one can also project a full-basis state to a symmetry-reduced basis
psi_s=np.zeros(basis.Ns)
-array_ind_s==basis.index( basis.state_to_int('01') )
+array_ind_s=basis.index( basis.state_to_int('01') )
psi_s[array_ind_s]=1.0 # create the state |01> in the full basis
#
psi_symm_s=basis_singlet.project_to(psi_s,sparse=False) # projects |01> to 1/sqrt(2) (|01> - |10>) in basis_singlet
|
[commands] Refactor quoted_word free function to a StringView method.
Technically a breaking change, however this interface was not
documented or guaranteed to exist. | @@ -33,7 +33,6 @@ import discord
from .errors import *
from .cooldowns import Cooldown, BucketType, CooldownMapping
-from .view import quoted_word
from . import converter as converters
from ._types import _BaseCommand
from .cog import Cog
@@ -421,7 +420,7 @@ class Command(_BaseCommand):
if consume_rest_is_special:
argument = view.read_rest().strip()
else:
- argument = quoted_word(view)
+ argument = view.get_quoted_word()
view.previous = previous
return await self.do_conversion(ctx, converter, argument, param)
@@ -434,7 +433,7 @@ class Command(_BaseCommand):
previous = view.index
view.skip_ws()
- argument = quoted_word(view)
+ argument = view.get_quoted_word()
try:
value = await self.do_conversion(ctx, converter, argument, param)
except CommandError:
@@ -450,7 +449,7 @@ class Command(_BaseCommand):
async def _transform_greedy_var_pos(self, ctx, param, converter):
view = ctx.view
previous = view.index
- argument = quoted_word(view)
+ argument = view.get_quoted_word()
try:
value = await self.do_conversion(ctx, converter, argument, param)
except CommandError:
|
fontconfig: Update Conan conventions
Automatically created by bincrafters-conventions 0.24.3 | -#!/usr/bin/env python
-# -*- coding: utf-8 -*-
import os
from conans import ConanFile, CMake, tools, RunEnvironment
-
class FontconfigTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package"
|
Make save_csv synchronous
Not synchronizing at the end of writing the file may lead to strange
effects for imbalanced tensors. | @@ -936,7 +936,7 @@ def save_csv(
truncate: bool = True,
):
"""
- Saves data to CSV files
+ Saves data to CSV files. Only 2D data, all split axes.
Parameters
----------
@@ -1045,6 +1045,7 @@ def save_csv(
offset = offset + row_width
csv_out.Close()
+ data.comm.handle.Barrier()
def save(
|
Chrysler: Correct ECU label for DASM
Chrysler: Correct ECU label for 0x753 | @@ -142,7 +142,7 @@ FW_VERSIONS = {
b'68535469AB',
b'68438454AC',
],
- (Ecu.fwdCamera, 0x753, None): [
+ (Ecu.fwdRadar, 0x753, None): [
b'68320950AL',
b'68320950AJ',
b'68454268AB',
|
Adding support for multiclass models or models returning multiple probabilities in fastai and the pyfunc flavor
*
Adding support for multiclass models or models returning multiple probabilities in `fastai` and the `pyfunc` flavor.
* Restore registered_model_name | @@ -317,7 +317,7 @@ class _FastaiModelWrapper:
def predict(self, dataframe):
dl = self.learner.dls.test_dl(dataframe)
preds, _ = self.learner.get_preds(dl=dl)
- return pd.DataFrame(map(np.array, preds.numpy()), columns=["predictions"])
+ return pd.Series(map(np.array, preds.numpy())).to_frame("predictions")
def _load_pyfunc(path):
|
app engine compatibility: handle missing httplib._CS_* constants
fixes | @@ -11,10 +11,6 @@ from httplib import HTTPMessage
from httplib import (HTTP_PORT,
HTTPS_PORT,
- _CS_IDLE,
- _CS_REQ_STARTED,
- _CS_REQ_SENT,
-
CONTINUE,
SWITCHING_PROTOCOLS,
PROCESSING,
@@ -81,6 +77,9 @@ except ImportError:
# These may not be available on all versions of Python 2.6.x or 2.7.x
try:
from httplib import (
+ _CS_IDLE,
+ _CS_REQ_STARTED,
+ _CS_REQ_SENT,
_MAXLINE,
_MAXHEADERS,
_is_legal_header_name,
|
Update decorators.py
More robust in case you somehow end up with "None" in your sys.path list somewhere | @@ -20,7 +20,7 @@ import sys
# Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in
# the Python standard library.
old_sys_path = sys.path[:]
-sys.path = [p for p in sys.path if "nltk" not in p]
+sys.path = [p for p in sys.path if p and "nltk" not in p]
import inspect
sys.path = old_sys_path
|
skip running standardize on string type input
can you review the changes? And where should we put the is_string() function | # -*- coding: utf-8 -*-
+from warnings import warn
import numpy as np
import pandas as pd
from .mad import mad
-
+from ..misc import NeuroKitWarning
def standardize(data, robust=False, window=None, **kwargs):
"""Standardization of data.
@@ -59,15 +60,41 @@ def standardize(data, robust=False, window=None, **kwargs):
"""
# Return appropriate type
if isinstance(data, list):
- data = list(_standardize(np.array(data), robust=robust, window=window, **kwargs))
+ if any(is_string(data)):
+ out = data
+ warn(
+ "The data is not standardized."
+ "Some elements in the list is of string type.",
+ category=NeuroKitWarning
+ )
+ else:
+ out = list(_standardize(np.array(data), robust=robust, window=window, **kwargs))
+
elif isinstance(data, pd.DataFrame):
- data = pd.DataFrame(_standardize(data, robust=robust, window=window, **kwargs))
+ _data = data.loc[:, ~is_string(data)]
+ to_append = data.loc[:, is_string(data)]
+ out = pd.DataFrame(_standardize(_data, robust=robust, window=window, **kwargs))
+ out = pd.concat([to_append, out], axis=1)
+
elif isinstance(data, pd.Series):
- data = pd.Series(_standardize(data, robust=robust, window=window, **kwargs))
+ if is_string(data):
+ out = data
+ warn(
+ "The data is not standardized as it is of string type.",
+ category=NeuroKitWarning)
else:
- data = _standardize(data, robust=robust, window=window, **kwargs)
+ out = pd.Series(_standardize(data, robust=robust, window=window, **kwargs))
- return data
+ else:
+ if is_string(data):
+ out = data
+ warn(
+ "The data is not standardized as it is of string type.",
+ category=NeuroKitWarning)
+ else:
+ out = _standardize(data, robust=robust, window=window, **kwargs)
+
+ return out
# =============================================================================
@@ -105,3 +132,15 @@ def _standardize(data, robust=False, window=None, **kwargs):
z = z.values
return z
+
+
+def is_string(x):
+ if isinstance(x, list):
+ out = [isinstance(member, str) for member in x]
+ if isinstance(x, pd.DataFrame):
+ out = [member == 'object' for member in list(x.dtypes)]
+ if isinstance(x, pd.Series):
+ out = [x.dtype == "object"]
+ if isinstance(x, np.ndarray):
+ out = [x.dtype == "U1"]
+ return np.array(out)
|
ceph-volume: fix TypeError exception when setting osds-per-device > 1
osds-per-device needs to be passed to run_command as a string.
Otherwise, expandvars method will try to iterate over an integer. | @@ -298,7 +298,7 @@ def batch(module, container_image):
cmd.append('--dmcrypt')
if osds_per_device > 1:
- cmd.extend(['--osds-per-device', osds_per_device])
+ cmd.extend(['--osds-per-device', str(osds_per_device)])
if objectstore == 'filestore':
cmd.extend(['--journal-size', journal_size])
|
update to be compliant with latest qcodes versions
fix fixes errors in the tests of test_kernel_distortions.py and
test_lfilt_kernel_object.py | @@ -92,9 +92,9 @@ class ConfigParameter(ManualParameter):
if initial_value is not None:
self.validate(initial_value)
- self._save_val(initial_value)
+ self.cache.set(initial_value)
- def set(self, value):
+ def set_raw(self, value):
"""
Validate and saves value.
If the value is different from the latest value it sets the
@@ -104,8 +104,8 @@ class ConfigParameter(ManualParameter):
self.validate(value)
if value != self.get_latest():
self._instrument._config_changed = True
- self._save_val(value)
+ self.cache.set(value)
- def get(self):
+ def get_raw(self):
""" Return latest value"""
return self.get_latest()
|
Update version 0.9.3 -> 0.9.4
Fixes
Fixed QUBO sampling bug in `DWaveSampler` introduced in 0.9.2 | # =============================================================================
__all__ = ['__version__', '__author__', '__authoremail__', '__description__']
-__version__ = '0.9.3'
+__version__ = '0.9.4'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = '[email protected]'
__description__ = 'All things D-Wave System.'
|
Update footer.scss
Footer logo sizing fix | @@ -192,10 +192,11 @@ footer {
height: 135px;
}
@media (min-width: 1600px) {
- background-size: 25rem !important;
+ background-size: 16.5vw !important;
width: unset;
height: 200px;
}
+
}
}
|
Add type hints for APISite.get_tokens()
It did cost me quite a while to find out why get_tokens("csrf") didn't
return anything. Answer: it needs to be get_tokens(["csrf"]). Hope the type
hints will help others. | @@ -19,6 +19,7 @@ from warnings import warn
import pywikibot
import pywikibot.family
+from pywikibot.backports import List
from pywikibot.comms.http import get_authentication
from pywikibot.data import api
from pywikibot.exceptions import (
@@ -1292,7 +1293,7 @@ class APISite(
return page._redirtarget
- def validate_tokens(self, types):
+ def validate_tokens(self, types: List[str]):
"""Validate if requested tokens are acceptable.
Valid tokens depend on mw version.
@@ -1315,7 +1316,7 @@ class APISite(
valid_types.append('csrf')
return valid_types
- def get_tokens(self, types, all: bool = False) -> dict:
+ def get_tokens(self, types: List[str], all: bool = False) -> dict:
"""Preload one or multiple tokens.
For MediaWiki version 1.23, only one token can be retrieved at once.
@@ -1336,7 +1337,6 @@ class APISite(
:param types: the types of token (e.g., "edit", "move", "delete");
see API documentation for full list of types
- :type types: iterable
:param all: load all available tokens, if None only if it can be done
in one request.
|
[tensorboard] Fix function input parameter for add_hparams
Summary:
closes
both parameters in add_hparams are mandatory.
cc sanekmelnikov orionr
Pull Request resolved: | @@ -268,7 +268,7 @@ class SummaryWriter(object):
"""Returns the directory where event files will be written."""
return self.log_dir
- def add_hparams(self, hparam_dict=None, metric_dict=None):
+ def add_hparams(self, hparam_dict, metric_dict):
"""Add a set of hyperparameters to be compared in TensorBoard.
Args:
|
Add back tags field to node serializer.
Introduced by bad merge in | @@ -5,7 +5,7 @@ from api.base.exceptions import (Conflict, EndpointNotImplementedError,
RelationshipPostMakesNoChanges)
from api.base.serializers import (VersionedDateTimeField, HideIfRegistration, IDField,
JSONAPIRelationshipSerializer,
- JSONAPISerializer, LinksField,
+ JSONAPISerializer, LinksField, ValuesListField,
NodeFileHyperLinkField, RelationshipField,
ShowIfVersion, TargetTypeField, TypeField,
WaterbutlerLink, relationship_diff, BaseAPISerializer)
@@ -165,6 +165,7 @@ class NodeSerializer(JSONAPISerializer):
preprint = ser.BooleanField(read_only=True, source='is_preprint')
fork = ser.BooleanField(read_only=True, source='is_fork')
collection = ser.BooleanField(read_only=True, source='is_collection')
+ tags = ValuesListField(attr_name='name', child=ser.CharField(), required=False)
access_requests_enabled = ser.BooleanField(read_only=False, required=False)
node_license = NodeLicenseSerializer(required=False, source='license')
template_from = ser.CharField(required=False, allow_blank=False, allow_null=False,
|
cli: correctly handle boto exception in set_asg_limits function
The exception "e" was raised but not defined. | @@ -345,7 +345,7 @@ def set_asg_limits(asg, min, max, desired):
asg.desired_capacity = desired
try:
return asg.update()
- except:
+ except boto.exception.BotoServerError as e:
raise e
def get_asg_ids(stack, config):
|
Avoid chdir in masspay
New comprehensive application wiring was tripping up on this. | @@ -21,12 +21,13 @@ from gratipay.billing.exchanges import get_ready_payout_routes_by_network
from httplib import IncompleteRead
-os.chdir('../logs/masspay')
+base_dir = '../logs/masspay'
ts = datetime.datetime.now().strftime('%Y-%m-%d')
-INPUT_CSV = '{}.input.csv'.format(ts)
-PAYPAL_CSV = '{}.output.paypal.csv'.format(ts)
-GRATIPAY_CSV = '{}.output.gratipay.csv'.format(ts)
-REPORT_CSV = '{}.report.paypal.csv'.format(ts)
+logpath = lambda t: os.path.join(base_dir, t.format(ts))
+INPUT_CSV = logpath('{}.input.csv')
+PAYPAL_CSV = logpath('{}.output.paypal.csv')
+GRATIPAY_CSV = logpath('{}.output.gratipay.csv')
+REPORT_CSV = logpath('{}.report.paypal.csv')
def round_(d):
|
Update generic.txt
Moving to ```zloader```: | @@ -10726,29 +10726,6 @@ littlegreenhands.org
alternasaludspa.com/1/
melonco.com/1/
-# Reference: https://twitter.com/FewAtoms/status/1317162909512892417
-# Reference: https://www.virustotal.com/gui/ip-address/8.208.76.109/relations
-# Reference: https://www.virustotal.com/gui/file/696bb0e2594ca7eda7482d77d12c56f904ff3d07985c45e6f2e5b7c027b2d1db/detection
-# Reference: https://www.virustotal.com/gui/file/9e566de0ea8df6d37bde4de438df7bc539cb0dae8fb5233bf9c27cb567dd894b/detection
-
-callmebb.com
-callmebe.com
-digdown2020.top
-digitfile24.top
-docsecure.top
-downdetect24.top
-download2020.top
-getfiles24.top
-manudeg.top
-mecorus.top
-onlyfiles24.top
-privatefiles24.top
-purefiles24.top
-puresoftware.top
-securefiles.top
-somefiles24.top
-therefiles24.top
-
# Reference: https://www.virustotal.com/gui/file/c0a7dfca7eda9d3f170e318428984c17b9737d4e53c291a227f97863ea30827e/detection
salesgroup.top
|
fix: Ensure that routing works when arg is False. Fixes
Thanks | @@ -88,7 +88,7 @@ async def handle_on(q: Q) -> bool:
else:
await func(q)
return True
- elif arg_value:
+ elif arg_value is not None:
func = _arg_handlers.get(arg)
if func:
await func(q)
|
Update docker-local-machine.rst
Included a note to reference the troubleshooting guide as per this issue | @@ -9,6 +9,8 @@ Note: This configuration should not be used in production, as it's using a known
If you're looking for a production installation with Docker, please see the `Mattermost Production Docker Deployment Guide <http://docs.mattermost.com/install/prod-docker.html>`_.
+If you have any problems installing, see the `troubleshooting guide <https://www.mattermost.org/troubleshoot/>`_. To submit an improvement or correction, click Edit at the top of this page.
+
One-line Docker Install
-----------------------
|
ocs_ci/ocs/constants.py
- Added constant for 'couchbase-operator-namespace' | @@ -384,6 +384,8 @@ COUCHBASE_WORKER_EXAMPLE = os.path.join(
TEMPLATE_COUCHBASE_SERVER_DIR, "couchbase-worker-example.yaml"
)
+COUCHBASE_OPERATOR = 'couchbase-operator-namespace'
+
HELLO_WORLD_PRODUCER_YAML = os.path.join(
TEMPLATE_AMQ_DIR, "hello-world-producer.yaml"
)
|
Further tweak. As always, thanks to who did this all for
fontTools, I just copied. | @@ -10,6 +10,7 @@ env:
matrix:
fast_finish: true
exclude:
+ # Exclude the default Python 3.6 build
- python: 3.6
include:
- python: 3.6
@@ -20,11 +21,9 @@ matrix:
env: TOXENV=py37-cov
dist: xenial
sudo: true
- - python: pypy
- env: TOXENV=pypy-nocov
- - language: generic
- os: osx
- env: TOXENV=py36-cov
+ - python: pypy3
+ env: TOXENV=pypy3
+ dist: xenial
- language: generic
os: osx
env:
|
Fix test loophole for loading samples during KFP startup
For more context see
We could remove this fix when ksonnet is deprecated. | @@ -47,12 +47,18 @@ cd ${DIR}/${KFAPP}
## Update pipeline component image
pushd ks_app
+# Delete pipeline component first before applying so we guarantee the pipeline component is new.
+ks delete default -c pipeline
+sleep 60s
+
ks param set pipeline apiImage ${GCR_IMAGE_BASE_DIR}/api-server:${GCR_IMAGE_TAG}
ks param set pipeline persistenceAgentImage ${GCR_IMAGE_BASE_DIR}/persistenceagent:${GCR_IMAGE_TAG}
ks param set pipeline scheduledWorkflowImage ${GCR_IMAGE_BASE_DIR}/scheduledworkflow:${GCR_IMAGE_TAG}
ks param set pipeline uiImage ${GCR_IMAGE_BASE_DIR}/frontend:${GCR_IMAGE_TAG}
-# Delete pipeline component first before applying so we guarantee the pipeline component is new.
-ks delete default -c pipeline
-sleep 60s
+# Swap the metadata/artifact storage PD to avoid reusing the old data.
+# We should remove this hack when we deprecate ksonnet.
+# See https://github.com/kubeflow/pipelines/pull/1805#issuecomment-520204987 for context
+ks param set pipeline minioPd ${KFAPP}-storage-metadata-store
+ks param set pipeline mysqlPd ${KFAPP}-storage-artifact-store
ks apply default -c pipeline
popd
|
use tempfile instead of writing to cwd
The Debian autopkgtest command runs by default in an environment where the
current working directory is not writeable. So, instead, use a proper
tempfile to test the %ls magic. | @@ -2,6 +2,7 @@ import os
import re
import subprocess
import pytest
+import tempfile
from metakernel import MetaKernel
from metakernel.tests.utils import (get_kernel, get_log_text, EvalKernel,
@@ -18,12 +19,10 @@ def test_magics():
for magic in ['file', 'html', 'javascript', 'latex', 'shell', 'time']:
assert magic in kernel.cell_magics
- with open('TEST.txt', 'wb'):
- pass
- kernel.get_magic('%shell ls')
+ with tempfile.NamedTemporaryFile() as ntf:
+ kernel.get_magic('%%shell ls %s' % ntf.name)
log_text = get_log_text(kernel)
- assert 'TEST.txt' in log_text
- os.remove('TEST.txt')
+ assert ntf.name in log_text
def test_help():
|
[ROI][gui] make draggable a property (read only).
Add condition, if not draggable then refuse to show the middle marker. | @@ -1369,11 +1369,15 @@ class _RoiMarkerHandler(object):
self._roi = weakref.ref(roi)
self._plot = weakref.ref(plot)
- self.draggable = False if roi.isICR() else True
+ self._draggable = False if roi.isICR() else True
self._color = 'black' if roi.isICR() else 'blue'
self._displayMidMarker = False
self._visible = True
+ @property
+ def draggable(self):
+ return self._draggable
+
@property
def plot(self):
return self._plot()
@@ -1394,6 +1398,9 @@ class _RoiMarkerHandler(object):
self.updateMarkers()
def showMiddleMarker(self, visible):
+ if self.draggable is False and visible is True:
+ _logger.warning("ROI is not draggable. Won't display middle marker")
+ return
self._displayMidMarker = visible
self.getMarker('middle').setVisible(self._displayMidMarker)
|
Don't delete user created file
Since the user has explicitly created the file to state that they
are okay with Kubernetes cluster to be wiped out, we should not be
deleting this file. | @@ -191,7 +191,6 @@ clobber: clean
-rm -rf watt
-$(if $(filter-out -,$(ENVOY_COMMIT)),rm -rf envoy envoy-src)
-rm -rf docs/node_modules
- -rm -rf .skip_test_warning # reset the test warning too
-rm -rf venv && echo && echo "Deleted venv, run 'deactivate' command if your virtualenv is activated" || true
print-%:
|
Use a single lambda function for all invocations
Astoria Transformer Explosion | @@ -98,20 +98,20 @@ def _upload_step(s3, step_idx, step, context):
)
-def _get_function_name(context, step_idx):
- return '{run_id}_deployment_package_{step_idx}'.format(run_id=context.run_id, step_idx=step_idx)
+def _get_function_name(context):
+ return '{run_id}_function'.format(run_id=context.run_id)
-def _create_lambda_step(aws_lambda, step_idx, deployment_package, context, role):
+def _create_lambda_step(aws_lambda, deployment_package, context, role):
runtime = _get_python_runtime()
context.debug(
- 'About create function with bucket {bucket} deployment_package_key {deploy_key}'.format(
+ 'About to create function with bucket {bucket} deployment_package_key {deploy_key}'.format(
bucket=context.resources.dagma.s3_bucket,
deploy_key=deployment_package,
)
)
res = aws_lambda.create_function(
- FunctionName=_get_function_name(context, step_idx),
+ FunctionName=_get_function_name(context),
Runtime=runtime,
Role=role.arn,
Handler='dagma.aws_lambda_handler',
@@ -222,24 +222,15 @@ def execute_plan(context, execution_plan, cleanup_lambda_functions=True, local=F
)
_upload_step(aws_s3_client, step_idx, step, context)
- # FIXME this should only be one function that we call multiple times
- lambda_steps = []
try:
- for step_idx, step in enumerate(steps):
- lambda_steps.append(
- _create_lambda_step(
+ lambda_step = _create_lambda_step(
aws_lambda_client,
- step_idx,
deployment_package_key,
context,
role,
)
- )
-
- # 'LambdaInvocationPayload', 'run_id step_idx key s3_bucket s3_key_inputs s3_key_body'
- # 's3_key_resources s3_key_outputs'
- for step_idx, lambda_step in enumerate(lambda_steps):
+ for step_idx, _ in enumerate(steps):
payload = LambdaInvocationPayload(
context.run_id,
step_idx,
@@ -264,7 +255,6 @@ def execute_plan(context, execution_plan, cleanup_lambda_functions=True, local=F
finally:
if cleanup_lambda_functions:
- for lambda_step in lambda_steps:
context.debug(
'Deleting lambda function: {name}'.format(name=lambda_step['FunctionName'])
)
|
Update FormatTimestamp.js
Add nanosecond case | @@ -23,6 +23,8 @@ export default {
input = input / 1000 // microseconds -> milliseconds
} else if (tsLength === 10) {
input = input * 1000000 // seconds -> milliseconds
+ } else if (tsLength === 19) {
+ input = input / 1000000 // nanoseconds -> milliseconds
}
return input
},
|
pep8 fix
src/collectors/mesos/mesos.py:175:29: E124 closing bracket does not match visual indentation | @@ -171,8 +171,7 @@ class MesosCollector(diamond.collector.Collector):
def _sum_statistics(self, x, y):
stats = set(x) | set(y)
summed_stats = dict([(key, x.get(key, 0) + y.get(key, 0))
- for key in stats
- ])
+ for key in stats])
return summed_stats
def _collect_slave_statistics(self):
|
Update exercises/practice/darts/.docs/hints.md
nested --> concentric | - This _Stack Overflow_ Post: [Equation for Testing if a Point is Inside a Circle][point-circle-equation] outlines one method.
- This _DoubleRoot_ post [Position of a point relative to a circle][point-to-circle] outlines a different one.
- This _Math is Fun_ post covers a more general [Distance Between 2 Points][distance-between-two-points] calculation.
-- Because the dart board is a set of _nested_ circles, the order in which you calculate points could change the answer significantly.
+- Because the dart board is a set of _concentric_ circles, the order in which you calculate points could change the answer significantly.
You should pay attention to which direction your calculations "move" in.
- Remember that this exercise has many potential solutions and many paths you can take along the way.
No path is manifestly "better" than another, although a particular path may be more interesting or better suited to what you want to learn or explore right now.
|
icu: Update Conan conventions
Automatically created by bincrafters-conventions 0.18.2 | @@ -48,7 +48,7 @@ class ICUBase(ConanFile):
def build_requirements(self):
if self._the_os == "Windows":
- self.build_requires("msys2/20161025")
+ self.build_requires("msys2/20190524")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
|
Fixed inline comment in debug.py
Was causing the Travis Ci build to fail | @@ -205,13 +205,16 @@ def run(generator, args, anchor_params):
while True:
key = cv2.waitKey(1)
cv2.imshow('Image', image)
- if key == ord('n'): # press n for next image
+ # press n for next image
+ if key == ord('n'):
i += 1
break
- if key == ord('b'): # press b for previous image
+ # press b for previous image
+ if key == ord('b'):
i -= 1
break
- if key == ord('q'): # press q to quit
+ # press q to quit
+ if key == ord('q'):
return False
return True
|
tests: create as many drives for virtualbox as libvirt
This just ensures that virtualbox and libvirt are making
the same amount of devices for tests. | @@ -477,7 +477,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
'--add', 'scsi']
end
- (0..1).each do |d|
+ (0..2).each do |d|
vb.customize ['createhd',
'--filename', "disk-#{i}-#{d}",
'--size', '11000'] unless File.exist?("disk-#{i}-#{d}.vdi")
|
Update dev-setup.rst
Added link to CentOS 7 setup | @@ -10,6 +10,7 @@ If you don't plan on contributing code to the Mattermost open source project, th
.. toctree::
Setting up Ubuntu 16.04 <dev-setup-ubuntu-1604.rst>
+ Setting up CentOS 7 <dev-setup-centos-7.rst>
Setting up Mac OS X <dev-setup-osx.rst>
Setting up Archlinux <dev-setup-archlinux.rst>
Setting up Windows <dev-setup-windows.rst>
|
Fixed breadcrumbs and page title
Using get_context_data combines page_context with menu_context, which has the nav info. | @@ -68,7 +68,7 @@ class TableauView(BaseDomainView):
def tableau_server_response(self):
from requests_toolbelt.adapters import host_header_ssl # avoid top-level import that breaks docs build
- context = self.page_context
+ context = self.get_context_data()
tabserver_url = 'https://{}/trusted/'.format(self.visualization.server.server_name)
post_arguments = {'username': self.visualization.server.domain_username}
if self.visualization.server.target_site != 'Default':
|
Add coverage for Blueprints.(app_)context_processor
Test both context_processor and app_context_processor functions.
Two context parameters are added into the context: one added to
the blueprint locally; another added to the app globally. The test
asserts the behaviors in both blueprint scope and the app scope.
The coverage for flask.blueprints is increased by 3%. | @@ -591,3 +591,45 @@ def test_add_template_test_with_name_and_template():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
+
+def test_context_processing():
+ app = flask.Flask(__name__)
+ answer_bp = flask.Blueprint('answer_bp', __name__)
+
+ template_string = lambda: flask.render_template_string(
+ '{% if notanswer %}{{ notanswer }} is not the answer. {% endif %}'
+ '{% if answer %}{{ answer }} is the answer.{% endif %}'
+ )
+
+ # App global context processor
+ @answer_bp.app_context_processor
+ def not_answer_context_processor():
+ return {'notanswer': 43}
+
+ # Blueprint local context processor
+ @answer_bp.context_processor
+ def answer_context_processor():
+ return {'answer': 42}
+
+ # Setup endpoints for testing
+ @answer_bp.route('/bp')
+ def bp_page():
+ return template_string()
+
+ @app.route('/')
+ def app_page():
+ return template_string()
+
+ # Register the blueprint
+ app.register_blueprint(answer_bp)
+
+ c = app.test_client()
+
+ app_page_bytes = c.get('/').data
+ answer_page_bytes = c.get('/bp').data
+
+ assert b'43' in app_page_bytes
+ assert b'42' not in app_page_bytes
+
+ assert b'42' in answer_page_bytes
+ assert b'43' in answer_page_bytes
|
Adding default gid if no gids given.
Fixes | @@ -270,6 +270,8 @@ class GoogleSheetsPreprocessor(BaseGooglePreprocessor):
gids = config.gids or []
if config.gid is not None:
gids.append(config.gid)
+ if not gids:
+ gids.append(0)
format_as = config.format
if config.collection and format_as not in GoogleSheetsPreprocessor.MAP_TYPES:
format_as = 'map'
|
Update setup_relative_calculation.py
flipping conditional statement for `remove_constraiint` | @@ -88,13 +88,13 @@ def getSetupOptions(filename):
if 'small_molecule_parameters_cache' not in setup_options:
setup_options['small_molecule_parameters_cache'] = None
+ if 'remove_constraints' not in setup_options:
+ setup_options['remove_constraints'] = False
+ _logger.info('No constraints will be removed')
# remove_constraints can be 'all' or 'not water'
- if setup_options['remove_constraints'] not in ['all', 'not water']:
+ elif setup_options['remove_constraints'] not in ['all', 'not water', False]:
_logger.warning("remove_constraints value of {setup_options['remove_constraints']} not understood. 'all', 'none' or 'not water' are valid options. NOT REMOVING ANY CONSTRAINTS")
setup_options['remove_constraints'] = False
- elif 'remove_constraints' not in setup_options:
- setup_options['remove_constraints'] = False
- _logger.info('No constraints will be removed')
if 'spectators' not in setup_options:
_logger.info(f'No spectators')
|
service: dev: install: Try installing each package individually
This isn't as fast. But will tell users what packages failed to install
Fixes: | @@ -374,30 +374,27 @@ class Install(CMD):
# Check if plugins not in skip list have unmet dependencies
if not self.nocheck:
self.dep_check(CORE_PLUGIN_DEPS, self.skip)
- # Packages fail to install if we run pip processes in parallel
- packages = list(
- map(
- lambda package: Path(*main_package.parts, *package),
- [
- package
- for package in CORE_PLUGINS
- if not "/".join(package) in self.skip
- ],
- )
- )
- self.logger.info("Installing %r in development mode", packages)
+ self.logger.info("Installing %r in development mode", CORE_PLUGINS)
+ failed = []
+ for package in CORE_PLUGINS:
+ if "/".join(package) in self.skip:
+ continue
+ package_path = Path(*main_package.parts, *package)
cmd = [sys.executable, "-m", "pip", "install"]
+ # Install to prefix, since --user sometimes fails
if self.user:
- # --user sometimes fails
local_path = Path("~", ".local").expanduser().absolute()
cmd.append(f"--prefix={local_path}")
- for package in packages:
- cmd += ["-e", str(package.absolute())]
+ # Install package in development mode
+ cmd += ["-e", str(package_path.absolute())]
self.logger.debug("Running: %s", " ".join(cmd))
+ # Packages fail to install if we run pip processes in parallel
proc = await asyncio.create_subprocess_exec(*cmd)
await proc.wait()
if proc.returncode != 0:
- raise RuntimeError("pip failed to install dependencies")
+ failed.append("/".join(package))
+ if failed:
+ raise RuntimeError(f"pip failed to install: {','.join(failed)}")
@configdataclass
|
Re-structured property _parent_dir and fixed path validation
Propety _parent_dir (WB API V1 only) is tightly coupled with folder
metadata and contents for Bitbucket, which must be re-structured to
work with Bitbucket API 2.0 upgrade. validate_v1_path() is fixed as
a side product. | @@ -86,22 +86,30 @@ class BitbucketProvider(provider.BaseProvider):
for part in path_obj.parts:
part._id = (commit_sha, branch_name)
- self._parent_dir = await self._fetch_dir_listing(path_obj.parent)
+ # Cache parent directory listing (a WB API V1 feature)
+ # Note: Property ``_parent_dir`` has been re-structured for Bitbucket API 2.0. Please refer
+ # to ``_fetch_path_metadata()`` and ``_fetch_dir_listing()`` for detailed information.
+ self._parent_dir = {
+ 'metadata': await self._fetch_path_metadata(path_obj.parent),
+ 'contents': await self._fetch_dir_listing(path_obj.parent)
+ }
- if path_obj.is_dir:
- if path_obj.name not in self._parent_dir['directories']:
- raise exceptions.NotFoundError(str(path))
- else:
+ # Tweak dir_commit_sha and dir_path for Bitbucket API 2.0
+ parent_dir_commit_sha = self._parent_dir['metadata']['commit']['hash'][:12]
+ parent_dir_path = '{}/'.format(self._parent_dir['metadata']['path'])
+
+ # Check file or folder existence
+ path_obj_type = 'commit_directory' if path_obj.is_dir else 'commit_file'
if path_obj.name not in [
- self.bitbucket_path_to_name(x['path'], self._parent_dir['path'])
- for x in self._parent_dir['files']
+ self.bitbucket_path_to_name(x['path'], parent_dir_path)
+ for x in self._parent_dir['contents'] if x['type'] == path_obj_type
]:
raise exceptions.NotFoundError(str(path))
# _fetch_dir_listing will tell us the commit sha used to look up the listing
# if not set in path_obj or if the lookup sha is shorter than the returned sha, update it
- if not commit_sha or (len(commit_sha) < len(self._parent_dir['node'])):
- path_obj.set_commit_sha(self._parent_dir['node'])
+ if not commit_sha or (len(commit_sha) < len(parent_dir_commit_sha)):
+ path_obj.set_commit_sha(parent_dir_commit_sha)
return path_obj
|
Fix mistaken up string formatting preventing immediate mapping
See | @@ -321,7 +321,7 @@ def open(name, device, keyfile=None):
ret = {}
keyfile_option = ('--key-file %s' % keyfile) if keyfile else ''
- devices = __salt__['cmd.run_stdout']('cryptsetup open {0} {0} {0}'\
+ devices = __salt__['cmd.run_stdout']('cryptsetup open {0} {1} {2}'\
.format(keyfile_option, device, name))
return ret
|
Fix handling of PropertyError DSL expression
TN: | @@ -1399,15 +1399,11 @@ class PropertyError(AbstractExpression):
super(PropertyError, self).__init__()
def construct(self):
- check_source_language(
- isinstance(self.expr_type, CompiledType),
- 'Invalid input type: {}'.format(repr(self.expr_type))
- )
check_source_language(
self.message is None or isinstance(self.message, str),
'Invalid error message: {}'.format(repr(self.message))
)
- return ErrorExpr(self.expr_type,
+ return ErrorExpr(resolve_type(self.expr_type),
names.Name('Property_Error'),
self.message)
|
change default temp to 298 K
Change the default temp to 298K in perses/app/relative_point_mutation_setup.py | @@ -17,7 +17,7 @@ from openff.toolkit.topology import Molecule
from openmmforcefields.generators import SystemGenerator
ENERGY_THRESHOLD = 1e-2
-temperature = 300 * unit.kelvin
+temperature = 298 * unit.kelvin
kT = kB * temperature
beta = 1.0/kT
ring_amino_acids = ['TYR', 'PHE', 'TRP', 'PRO', 'HIS']
|
docs(README): Added links for cookiecutter projects
Added section to README for community donated cookiecutter templates.
refs | @@ -215,7 +215,9 @@ Tutorials
* [video](https://www.youtube.com/watch?v=pebeWrTqIIw)
* [slides](https://github.com/python-cmd2/talks/blob/master/PyOhio_2019/cmd2-PyOhio_2019.pdf)
* [example code](https://github.com/python-cmd2/talks/tree/master/PyOhio_2019/examples)
-
+* [Cookiecutter](https://github.com/cookiecutter/cookiecutter) Templates from community
+ * Basic cookiecutter template for cmd2 application : https://github.com/jayrod/cookiecutter-python-cmd2
+ * Advanced cookiecutter template with external plugin support : https://github.com/jayrod/cookiecutter-python-cmd2-ext-plug
Example Application
-------------------
|
Adds backward compability to GateSet._calc() for pickled gatesets.
Now, if a GateSet doesn't have the _calcClass member, it initializes
_calcClass to GateMatrixCalc as a default. | @@ -967,6 +967,8 @@ class GateSet(object):
def _calc(self):
+ if not hasattr(self,"_calcClass"): #for backward compatibility
+ self._calcClass = _GateMatrixCalc
return self._calcClass(self._dim, self.gates, self.preps,
self.effects, self.povm_identity,
self.spamdefs, self._remainderlabel,
|
Update sparsifying_yolact_using_recipes.md
Update numbers for 0.12 | @@ -172,9 +172,11 @@ The table below compares these tradeoffs and shows how to run them on the COCO d
| Sparsification Type | Description | COCO mAP@all | Size on Disk | DeepSparse Performance** | Commands |
|:-------------------:|:---------------------------------------------------------------------------------:|:------------:|:------------:|:------------------------:|:--------------------------------------------------------------------------------------:|
-| Baseline | The baseline, pretrained model on the COCO dataset. | 0.288 | 170 MB | -- img/sec | `python train.py` |
-| Pruned | A highly sparse, FP32 model that recovers close to the baseline model. | 0.286 | 30.1 MB | -- img/sec | `python train.py --resume weights/model.pth --recipe ../recipe/yolact.pruned.md` |
-| Pruned Quantized | A highly sparse, INT8 model that recovers reasonably close to the baseline model. | 0.282 | 9.7 MB | -- img/sec | `python train.py --resume weights/model.pth --recipe ../recipe/yolact.pruned_quant.md` |
+| Baseline | The baseline, pretrained model on the COCO dataset. | 0.288 | 170 MB | 29.7 img/sec | `python train.py` |
+| Pruned | A highly sparse, FP32 model that recovers close to the baseline model. | 0.286 | 30.1 MB | 61.6 img/sec | `python train.py --resume weights/model.pth --recipe ../recipe/yolact.pruned.md` |
+| Pruned Quantized | A highly sparse, INT8 model that recovers reasonably close to the baseline model. | 0.282 | 9.7 MB | 144.4 img/sec | `python train.py --resume weights/model.pth --recipe ../recipe/yolact.pruned_quant.md` |
+
+ \*\* DeepSparse Performance measured on an AWS c5.12xlarge instance with 24 cores, batch size 64, and 550x550 input with version 0.12.0 of the DeepSparse Engine i.e. `deepsparse.benchmark --batch_size 64 --scenario sync [model_path]`
2. Select a recipe to use on top of the pre-trained model you created.
|
cache: py2 compatibility, kwargs after named args
Fix compaitbily with the py2 quayio branch. move the kwargs at the
end of the call | @@ -10,20 +10,20 @@ class ReadEndpointSupportedRedis(object):
raise Exception("Missing primary host for Redis model cache configuration")
self.write_client = StrictRedis(
- **primary,
socket_connect_timeout=1,
socket_timeout=2,
health_check_interval=2,
+ **primary,
)
if not replica:
self.read_client = self.write_client
else:
self.read_client = StrictRedis(
- **replica,
socket_connect_timeout=1,
socket_timeout=2,
health_check_interval=2,
+ **replica,
)
def get(self, key, *args, **kwargs):
|
[resotocore][fix] Define configfile parameter explicitly
Otherwise the system command might fail in certain scenarios.
This happens under tox, but not under pytest directly. | @@ -2198,6 +2198,7 @@ class SystemCommand(CLICommand, PreserveOutputFormat):
"--server.database", args.graphdb_database,
"--server.username", args.graphdb_username,
"--server.password", args.graphdb_password,
+ "--configuration", "none",
stderr=asyncio.subprocess.PIPE,
)
# fmt: on
@@ -2251,6 +2252,7 @@ class SystemCommand(CLICommand, PreserveOutputFormat):
"--server.database", args.graphdb_database,
"--server.username", args.graphdb_username,
"--server.password", args.graphdb_password,
+ "--configuration", "none",
stderr=asyncio.subprocess.PIPE,
)
# fmt: on
|
More aggressive shutdown detection
Prevents hanging language server processes after quit
Fixes | @@ -311,6 +311,7 @@ class WindowManager(object):
self._restarting = False
self._project_path = get_project_path(self._window)
self._on_closed = on_closed
+ self._is_closing = False
def get_session(self, config_name: str) -> 'Optional[Session]':
return self._sessions.get(config_name)
@@ -446,25 +447,33 @@ class WindowManager(object):
def _handle_view_closed(self, view, session):
self._diagnostics.remove(view, session.config.name)
- self._sublime.set_timeout_async(lambda: self._check_window_closed(), 500)
+ if not self._is_closing:
+ if not self._window.is_valid():
+ # try to detect close synchronously (for quitting)
+ self._handle_window_closed()
+ else:
+ # in case the window is invalidated after the last view is closed
+ self._sublime.set_timeout_async(lambda: self._check_window_closed(), 100)
def _check_window_closed(self):
- debug('check window closed')
+ # debug('window {} check window closed closing={}, valid={}'.format(
+ # self._window.id(), self._is_closing, self._window.is_valid()))
- if not self._window.is_valid():
+ if not self._is_closing and not self._window.is_valid():
self._handle_window_closed()
def _handle_window_closed(self):
- debug('window closed, ending sessions')
+ debug('window {} closed, ending sessions'.format(self._window.id()))
+ self._is_closing = True
self.end_sessions()
def _handle_all_sessions_ended(self):
debug('clients for window {} unloaded'.format(self._window.id()))
if self._restarting:
- debug('restarting')
+ debug('window {} sessions unloaded - restarting')
self.start_active_views()
elif not self._window.is_valid():
- debug('window no longer valid')
+ debug('window {} closed and sessions unloaded'.format(self._window.id()))
if self._on_closed:
self._on_closed()
|
Added parameter showname to hide network interface
Becomes a needless info for personal laptop usages where only one interface is used. | Parameters:
* traffic.exclude: Comma-separated list of interface prefixes to exclude (defaults to "lo,virbr,docker,vboxnet,veth")
* traffic.states: Comma-separated list of states to show (prefix with "^" to invert - i.e. ^down -> show all devices that are not in state down)
+ * traffic.showname: set as False to hide network interface name
"""
import re
@@ -23,6 +24,7 @@ class Module(bumblebee.engine.Module):
self._exclude = tuple(filter(len, self.parameter("exclude", "lo,virbr,docker,vboxnet,veth").split(",")))
self._status = ""
+ self._showname = self.parameter("showname", "True")
self._prev = {}
self._states = {}
self._states["include"] = []
@@ -86,6 +88,7 @@ class Module(bumblebee.engine.Module):
name = "traffic-{}".format(interface)
+ if self._showname != "False":
self.create_widget(widgets, name, interface)
for direction in ["rx", "tx"]:
|
fixed OneCall integration test
renamed `OneCall.one_call_historical` method to `OneCall.one_call_history` to complain with older PyOWM naming convention
added method `OneCall.to_geopoint` | @@ -527,7 +527,7 @@ class WeatherManager:
_, json_data = self.http_client.get_json(ONE_CALL_URI, params=params)
return one_call.OneCall.from_dict(json_data)
- def one_call_historical(self, lat: Union[int, float], lon: Union[int, float], dt: int = None):
+ def one_call_history(self, lat: Union[int, float], lon: Union[int, float], dt: int = None):
"""
Queries the OWM Weather API with one call for historical weather information for the
specified geographic coordinates.
|
[microNPU] Remove xfail from tests relating to
Removes tests previously marked as xfail since the issue has now
been resolved. | @@ -347,7 +347,6 @@ def test_ethosu_binary_elementwise(
([1, 4, 4], [4, 1]),
],
)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_binary_add_with_non_4d_shapes(
request,
accel_type,
@@ -606,7 +605,6 @@ def test_ethosu_right_shift_binary_elemwise(
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(3, 2), (1, 15, 11, 7), (3, 1, 12), (400,)])
@pytest.mark.parametrize("ifm_scale, ifm_zp, ofm_scale, ofm_zp", [(1, 0, 1, 0), (0.015, 3, 0.2, 5)])
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_ethosu_identity_codegen(
request, ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp, accel_type
):
@@ -655,7 +653,6 @@ def test_ethosu_identity_codegen(
((8, 7, 3), (-4, 1, 8, -2)),
],
)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_relay_reshape_codegen(ifm_shape, new_shape, accel_type):
np.random.seed(0)
@@ -688,7 +685,6 @@ def test_relay_reshape_codegen(ifm_shape, new_shape, accel_type):
([5000], [123], [2151]),
],
)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_tflite_slice(request, accel_type, ifm_shape, begin, size):
np.random.seed(0)
@@ -724,7 +720,6 @@ def test_tflite_strided_slice(accel_type, ifm_shape, begin, end):
"ifm_shape",
[[1, 5, 12, 4], [1, 1, 2], [4, 3, 2], [10, 20], [345]],
)
[email protected](reason="See https://github.com/apache/tvm/issues/12511")
def test_ethosu_unary_elementwise(
request,
accel_type,
|
Change "@asyncio.coroutine" to "async def"
Fix - DeprecationWarning: "@coroutine" decorator is deprecated since Python 3.8, use "async def" instead | @@ -13,8 +13,7 @@ from asyncio.locks import Lock as _Lock
class Lock(_Lock):
if sys.version_info < (3, 7, 0):
- @asyncio.coroutine
- def acquire(self):
+ async def acquire(self):
"""Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
@@ -27,7 +26,7 @@ class Lock(_Lock):
self._waiters.append(fut)
try:
- yield from fut
+ await fut
self._locked = True
return True
except asyncio.CancelledError:
|
tests: source: file: Correct gzip test
* Tests were checking for builtins.open rather than gzip.open.
This patch corrects that. | @@ -67,9 +67,9 @@ class TestFileSource(AsyncTestCase):
source = FakeFileSource('testfile.gz')
m_open = mock_open()
with patch('os.path.exists', return_value=True), \
- patch('builtins.open', m_open):
+ patch('gzip.open', m_open):
await source.open()
- m_open.assert_called_once_with('testfile.gz', 'rb')
+ m_open.assert_called_once_with('testfile.gz', 'rt')
async def test_open_no_file(self):
source = FakeFileSource('testfile')
@@ -87,9 +87,9 @@ class TestFileSource(AsyncTestCase):
async def test_close_gz(self):
source = FakeFileSource('testfile.gz')
m_open = mock_open()
- with patch('builtins.open', m_open):
+ with patch('gzip.open', m_open):
await source.close()
- m_open.assert_called_once_with('testfile.gz', 'wb')
+ m_open.assert_called_once_with('testfile.gz', 'wt')
async def test_close_readonly(self):
source = FakeFileSource('testfile:ro')
|
client: do not retry upload long time
This is to make the task like
fail fast. | @@ -734,12 +734,14 @@ def _upload_with_go(storage, outdir, isolated_client):
# This mitigates https://crbug.com/1094369, where there is a data race on
# the uploaded files.
backoff = 10
+ started = time.time()
while True:
try:
_run_go_isolated_and_wait(cmd)
break
except Exception:
- if backoff > 100:
+ if time.time() > started + 60 * 2:
+ # This is to not wait task having leaked process long time.
raise
on_error.report('error before %d second backoff' % backoff)
|
DPDK: pick last hotplug probe match
probe can occur multiple times, last one will be the successful probe.
Swap to picking the last probe to split results. | @@ -832,10 +832,10 @@ class DpdkTestpmd(Tool):
]
after_rescind = self._last_run_output[device_removal_index:]
# Identify the device add event
- hotplug_match = self._search_hotplug_regex.search(after_rescind)
+ hotplug_match = self._search_hotplug_regex.finditer(after_rescind)
if not hotplug_match:
- hotplug_alt_match = self._search_hotplug_regex_alt.search(after_rescind)
+ hotplug_alt_match = self._search_hotplug_regex_alt.finditer(after_rescind)
if hotplug_alt_match:
hotplug_match = hotplug_alt_match
else:
@@ -843,13 +843,18 @@ class DpdkTestpmd(Tool):
if command_dumped in self._last_run_output:
raise LisaException("Testpmd crashed after device removal.")
+ # pick the last match
+ try:
+ *_, last_match = hotplug_match
+ except ValueError:
raise LisaException(
"Could not identify vf hotplug events in testpmd output."
)
- self.node.log.info(f"Identified hotplug event: {hotplug_match.group(0)}")
- before_reenable = after_rescind[: hotplug_match.start()]
- after_reenable = after_rescind[hotplug_match.end() :]
+ self.node.log.info(f"Identified hotplug event: {last_match.group(0)}")
+
+ before_reenable = after_rescind[: last_match.start()]
+ after_reenable = after_rescind[last_match.end() :]
self._testpmd_output_during_rescind = before_reenable
self._testpmd_output_after_reenable = after_reenable
|
llvm, mechanisms/optimizationcontrolmechanism: Add callbacks to generate evaluate function
Add custom output state invocations to implement value parsing. | @@ -972,6 +972,36 @@ class OptimizationControlMechanism(ControlMechanism):
data = self.agent_rep._get_data_initializer(execution_id)
return (state, data)
+ def _get_evaluate_output_struct_type(self, ctx):
+ # Returns a scalar that is the predicted net_outcome
+ return ctx.float_ty
+
+ def _get_evaluate_alloc_struct_type(self, ctx):
+ return pnlvm.ir.ArrayType(ctx.float_ty,
+ len(self.control_allocation_search_space))
+
+ def _gen_llvm_evaluate(self, ctx, builder, params, state, arg_in, arg_out):
+ return builder
+
+ def _gen_llvm_output_states(self, ctx, builder, params, context, value, so):
+ for i, state in enumerate(self.output_states):
+
+ # LLVM equivalent of parse value; extract array element
+ # corresponsing to the output state number
+ os_input = builder.alloca(pnlvm.ir.ArrayType(ctx.float_ty, 1))
+ val_ptr = builder.gep(value, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(i)])
+ dest_ptr = builder.gep(os_input, [ctx.int32_ty(0), ctx.int32_ty(0)])
+ builder.store(builder.load(val_ptr), dest_ptr)
+
+
+ os_params = builder.gep(params, [ctx.int32_ty(0), ctx.int32_ty(2), ctx.int32_ty(i)])
+ os_context = builder.gep(context, [ctx.int32_ty(0), ctx.int32_ty(2), ctx.int32_ty(i)])
+ os_output = builder.gep(so, [ctx.int32_ty(0), ctx.int32_ty(i)])
+ os_function = ctx.get_llvm_function(state)
+ builder.call(os_function, [os_params, os_context, os_input, os_output])
+
+ return builder
+
def apply_control_allocation(self, control_allocation, runtime_params, context, execution_id=None):
'''Update `values <ControlSignal.value>` of `control_signals <ControlMechanism.control_signals>` based on
specified `control_allocation <ControlMechanism.control_allocation>`.
|
added outputName to thumbnail representation
In case of integrating thumbnail, 'outputName' value will be used in templeate as {output} placeholder.
Without it integrated thumbnail would overwrite integrated review high res file. | @@ -162,6 +162,7 @@ class ExtractReview(publish.Extractor):
instance.data["representations"].append({
"name": "thumbnail",
"ext": "jpg",
+ "outputName": "thumb",
"files": os.path.basename(thumbnail_path),
"stagingDir": staging_dir,
"tags": ["thumbnail", "delete"]
|
Update prometheus_tds.txt
Minor update | @@ -17,6 +17,8 @@ http://109.248.203.207
http://109.248.203.23
http://109.248.203.33
http://109.248.203.50
+http://139.162.190.64
+http://139.162.190.91
http://155.94.193.10
http://172.104.151.55
http://185.158.114.121
@@ -40,11 +42,14 @@ http://188.130.139.228
http://188.130.139.5
http://188.130.139.88
http://195.123.220.220
+http://195.123.222.26
+http://195.123.241.180
http://195.62.53.109
http://46.8.210.13
http://46.8.210.30
http://51.15.27.25
http://62.138.0.68
+http://85.90.247.25
abouniteta.ru
afternearde.ru
anumessensan.ru
|
instruments/acme_cape: Fix missing parameter to `get_instruments`
The signature of `get_instruments` was missing the `keep_raw` parameter
so fix this and use it as part of the subsequent common invocation. | @@ -310,7 +310,7 @@ class AcmeCapeBackend(EnergyInstrumentBackend):
# pylint: disable=arguments-differ
def get_instruments(self, target, metadir,
- iio_capture, host, iio_devices, buffer_size):
+ iio_capture, host, iio_devices, buffer_size, keep_raw):
#
# Devlib's ACME instrument uses iio-capture under the hood, which can
@@ -331,7 +331,7 @@ class AcmeCapeBackend(EnergyInstrumentBackend):
for iio_device in iio_devices:
ret[iio_device] = AcmeCapeInstrument(
target, iio_capture=iio_capture, host=host,
- iio_device=iio_device, buffer_size=buffer_size, keep_raw=self.keep_raw)
+ iio_device=iio_device, buffer_size=buffer_size, keep_raw=keep_raw)
return ret
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.