message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Fix typo
This typo causes tests to fail | @@ -3,7 +3,7 @@ from collections import defaultdict
from functools import lru_cache
from itertools import chain
-from wikibaseintegrator.wdi_config import config
+from wikidataintegrator.wdi_config import config
example_Q14911732 = {'P1057':
{'Q14911732-23F268EB-2848-4A82-A248-CF4DF6B256BC':
|
Add info on segments extension to contrib guide
Add link from segments extension doc to routed networks spec,
which explains the use case.
Trivialfix
Closes-bug: | @@ -19,6 +19,8 @@ Neutron has an extension that allows CRUD operations on the ``/segments``
resource in the API, that corresponds to the ``NetworkSegment`` entity in the
DB layer. The extension is implemented as a service plug-in.
+Details about the DB models, API extension, and use cases can be found here: `routed networks spec <http://specs.openstack.org/openstack/neutron-specs/specs/newton/routed-networks.html>`_
+
.. note:: The ``segments`` service plug-in is not configured by default. To
configure it, add ``segments`` to the ``service_plugins`` parameter in
``neutron.conf``
|
MAINT: Restore KnowFailureTest
This is for backwards compatibility in the soon to be removed nose support, so leave it in for a bit. | from nose.plugins.base import Plugin
from nose.util import src
from .nosetester import get_package_name
-from .utils import KnownFailureException
+from .utils import KnownFailureException, KnownFailureTest
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
|
[typo] fucntion ==> function
fucntion ==> function | @@ -118,7 +118,7 @@ TVM_DLL int TVMBackendParallelBarrier(int task_id, TVMParallelGroupEnv* penv);
/*!
- * \brief Simple static initialization fucntion.
+ * \brief Simple static initialization function.
* Run f once and set handle to be not null.
* This function is mainly used for test purpose.
*
|
UIEditor : Improve preset name editing
Early out when name is unchanged
Disallow empty names | @@ -1391,6 +1391,14 @@ class _PresetsEditor( GafferUI.Widget ) :
oldName = selectedPaths[0][0]
newName = nameWidget.getText()
+ if oldName == newName :
+ return True
+
+ if newName == "" :
+ # Empty names are not allowed, so revert to previous
+ nameWidget.setText( oldName )
+ return True
+
items = self.__pathListing.getPath().dict().items()
with Gaffer.BlockedConnection( self.__plugMetadataChangedConnection ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
|
[dagit] Repair duplicate keys in App.test
Summary: Try to fix App.test.
Test Plan: `yarn test App.test`
Reviewers: bengotow | @@ -34,6 +34,7 @@ export const PipelineTable: React.FC<Props> = (props) => {
items.push(...item.pipeline.modes.map((mode) => ({...item, mode: mode.name})));
}
}
+
return (
<Table>
<thead>
@@ -100,7 +101,7 @@ export const PipelineTable: React.FC<Props> = (props) => {
<Group direction="row" spacing={4} alignItems="center">
{pipeline.runs.map((run) => (
<RunStatusWithStats
- key={run.runId}
+ key={run.id}
runId={run.runId}
status={run.status}
size={12}
|
Updated print statements as function
Use Python3 style print() | @@ -25,7 +25,7 @@ To replace the configuration do the following::
Note that the changes have not been applied yet. Before applying the configuration you can check the changes::
- >>> print device.compare_config()
+ >>> print(device.compare_config())
+ hostname pyeos-unittest-changed
- hostname pyeos-unittest
router bgp 65000
@@ -57,7 +57,7 @@ Merging Configuration
Merging configuration is similar, but you need to load the configuration with the merge method::
>>> device.load_merge_candidate(config='hostname test\ninterface Ethernet2\ndescription bla')
- >>> print device.compare_config()
+ >>> print(device.compare_config())
configure
hostname test
interface Ethernet2
|
[Caffe2] [Int8] More exhaustive unit tests for int8 ops (+ bug fix in Int8Add in-place case)
As title. This catches one bug in the Int8Add in-place case,
which wasn't tested in int8_test.cc | @@ -678,6 +678,35 @@ class Caffe2Backend(Backend):
cls._visit_and_substitute_raw_values(model.graph.node, raw_values_dict)
+ @classmethod
+ def _substitute_raw_value(cls, tp, raw_values_dict):
+ if tp.HasField('raw_data') and tp.raw_data == bytes(b'__EXTERNAL'):
+ if tp.name not in raw_values_dict:
+ raise RuntimeError('TensorProto for value {} referenced raw data but it was not found!'.format(tp.name))
+ else:
+ tp.raw_data = raw_values_dict[tp.name]
+
+ @classmethod
+ def _visit_and_substitute_raw_values(cls, nodes, raw_values_dict):
+ for node in nodes:
+ for attr in node.attribute:
+ if attr.HasField('t'):
+ cls._substitute_raw_value(attr.t, raw_values_dict)
+ for t in attr.tensors:
+ cls._substitute_raw_value(t, raw_values_dict)
+ if attr.HasField('g'):
+ cls._visit_and_substitute_raw_values(attr.g.node, raw_values_dict)
+ for g in attr.graphs:
+ cls._visit_and_substitute_raw_values(g.node, raw_values_dict)
+
+ @classmethod
+ def _external_value_resolution_pass(cls, model, raw_values_dict):
+ for init in model.graph.initializer:
+ cls._substitute_raw_value(init, raw_values_dict)
+
+ cls._visit_and_substitute_raw_values(model.graph.node, raw_values_dict)
+
+
@classmethod
def _direct_initialize_parameters(cls, initializer, ws, device_option):
for tp in initializer:
|
Update README.md
fix reference | @@ -6,7 +6,7 @@ This directory contains the `Dockerfile` to build and run PyAleph in production.
You can build the Docker image simply using:
```shell script
-./deployment/docker/build.sh
+./deployment/docker-build/build.sh
```
or by running the Docker build command from the root of the repository:
|
Make test_throttler happy
neutron.tests.unit.common.test_utils.TestThrottler.test_throttler
is failing with "AssertionError: 1 not greater than 1", change the
assert to assertGreaterEqual.
Closes-Bug: | @@ -426,7 +426,7 @@ class TestThrottler(base.BaseTestCase):
def sleep_mock(amount_to_sleep):
sleep(amount_to_sleep)
- self.assertGreater(threshold, amount_to_sleep)
+ self.assertGreaterEqual(threshold, amount_to_sleep)
with mock.patch.object(utils.eventlet, "sleep",
side_effect=sleep_mock):
|
Add metric threshold settings to managedobjectprofile
HG--
branch : feature/microservices | @@ -28,14 +28,30 @@ from noc.main.models.remotesystem import RemoteSystem
from noc.core.scheduler.job import Job
from noc.core.defer import call_later
from .objectmap import ObjectMap
-from noc.sa.interfaces.base import (DictListParameter, ObjectIdParameter, BooleanParameter, IntParameter)
+from noc.sa.interfaces.base import (DictListParameter, ObjectIdParameter, BooleanParameter,
+ IntParameter, StringParameter)
m_valid = DictListParameter(attrs={"metric_type": ObjectIdParameter(required=True),
- "is_active": BooleanParameter(required=True),
+ "is_active": BooleanParameter(default=False),
+ "is_stored": BooleanParameter(default=True),
+ "window_type": StringParameter(choices=["m", "t"],
+ default="m"),
+ "window": IntParameter(default=1),
+ "window_function": StringParameter(choices=["handler", "last", "avg",
+ "percentile", "q1", "q2", "q3",
+ "p95", "p99"],
+ default="last"),
+ "window_config": StringParameter(default=None),
+ "window_related": BooleanParameter(default=False),
"low_error": IntParameter(required=False),
"high_error": IntParameter(required=False),
"low_warn": IntParameter(required=False),
- "high_warn": IntParameter(required=False)})
+ "high_warn": IntParameter(required=False),
+ "low_error_weight": IntParameter(default=10),
+ "low_warn_weight": IntParameter(default=1),
+ "high_warn_weight": IntParameter(default=1),
+ "high_error_weight": IntParameter(default=10)})
+
id_lock = Lock()
|
fix: connected app
fix errors from previous refactor | @@ -74,7 +74,6 @@ class ConnectedApp(Document):
try:
token = self.get_stored_user_token(user)
- token = token.check_validity()
except frappe.exceptions.DoesNotExistError:
redirect = self.initiate_web_application_flow(user, success_uri)
frappe.local.response['type'] = 'redirect'
@@ -122,7 +121,7 @@ def callback(code=None, state=None):
frappe.throw(_('Invalid App'))
oauth = app.get_oauth2_session()
- token = oauth.fetch_token(app.token_endpoint,
+ token = oauth.fetch_token(app.token_uri,
code=code,
client_secret=app.get_password('client_secret'),
include_client_id=True
|
window.py: Change ConfigureNotify handling
Per the TODO, make it confirm to ICCCM 4.2.3 | @@ -148,6 +148,8 @@ class _Window(CommandObject):
self.icons = {}
window.set_attribute(eventmask=self._window_mask)
+ self._old_geometry = None
+
self._float_info = {
'x': None,
'y': None,
@@ -458,20 +460,6 @@ class _Window(CommandObject):
space around window as int or list of ints [N E S W]
"""
- # TODO: self.x/y/height/width are updated BEFORE
- # place is called, so there's no way to know if only
- # the position is changed, so we are sending
- # the ConfigureNotify every time place is called
- #
- # # if position change and size don't
- # # send a configure notify. See ICCCM 4.2.3
- # send_notify = False
- # if (self.x != x or self.y != y) and \
- # (self.width == width and self.height == height):
- # send_notify = True
- # #for now, we just:
- send_notify = True
-
# Adjust the placement to account for layout margins, if there are any.
if margin is not None:
if isinstance(margin, int):
@@ -486,11 +474,33 @@ class _Window(CommandObject):
self.float_x = x - self.group.screen.x
self.float_y = y - self.group.screen.y
+ # See ICCCM 4.2.3
+ send_notify = False
+ if (
+ self._old_geometry is not None
+ and (
+ self._old_geometry['x'] != x
+ or self._old_geometry['y'] != y
+ )
+ and (
+ self._old_geometry['width'] == width
+ and self._old_geometry['height'] == height
+ )
+ ):
+ send_notify = True
+
self.x = x
self.y = y
self.width = width
self.height = height
+ self._old_geometry = {
+ 'x': x,
+ 'y': y,
+ 'width': width,
+ 'height': height,
+ }
+
kwarg = dict(
x=x,
y=y,
|
Eslint complained about the risk of stale refs
In the useMount example it is worried about a non-existent problem.
useMount is one recommended solution:
In the second example, maybe it was right.
I've declared or inlined all dependencies. | @@ -89,9 +89,12 @@ export const UnwrappedPerfPage = ({
// Fetch the real data
useEffect(() => {
if (uiAvailable) {
- doPerfRESTFetch({ ...queryparams.getAll() });
+ const urlparams = new QueryParamHelpers(
+ get(testMethodPerfUI, 'defaults', {}),
+ );
+ doPerfRESTFetch({ ...urlparams.getAll() });
}
- }, [uiAvailable, doPerfRESTFetch, queryparams]);
+ }, [uiAvailable, doPerfRESTFetch, testMethodPerfUI]);
let results: {}[];
if (
|
Update junos.py
the whole reason i created this commit :-/ | @@ -49,7 +49,8 @@ def init(opts):
"host" : opts['proxy']['host'],
"password" : opts['proxy']['passwd']
}
- optional_args= ['gather_facts',
+ optional_args= ['port',
+ 'gather_facts',
'mode',
'baud',
'attempts',
|
Handle None result correctly
If previous handler had an exception, the future provided to
_determine_next will return None, and therefore cause another error to
occur. This fixes the issue, and logs it accordingly. | @@ -202,6 +202,11 @@ class Dispatcher(InstrumentedThread):
LOGGER.exception("Dispatcher timeout waiting on handler result.")
raise
+ if res is None:
+ LOGGER.debug('Ignoring None handler result, likely due to an '
+ 'unhandled error while executing the handler')
+ return
+
if res.status == HandlerStatus.DROP:
del self._message_information[message_id]
|
style: fmt
[skip ci] | @@ -62,7 +62,9 @@ def get_context(context):
"google_analytics_id": frappe.conf.get("google_analytics_id"),
"google_analytics_anonymize_ip": frappe.conf.get("google_analytics_anonymize_ip"),
"mixpanel_id": frappe.conf.get("mixpanel_id"),
- "app_name": frappe.get_website_settings("app_name") or frappe.get_system_settings("app_name") or "Frappe",
+ "app_name": (
+ frappe.get_website_settings("app_name") or frappe.get_system_settings("app_name") or "Frappe"
+ ),
}
)
|
Install missing packages in dev install
Test Plan: Manual
Reviewers: sashank, yuhan | @@ -70,6 +70,8 @@ def main(quiet):
"-e python_modules/libraries/dagster-gcp",
"-e python_modules/libraries/dagster-k8s",
"-e python_modules/libraries/dagster-celery-k8s",
+ "-e python_modules/libraries/dagster-github",
+ "-e python_modules/libraries/dagster-mysql",
"-e python_modules/libraries/dagster-pagerduty",
"-e python_modules/libraries/dagster-papertrail",
"-e python_modules/libraries/dagster-postgres",
|
Disable MNIST test in test_xla()
Summary: Pull Request resolved:
Test Plan: Imported from OSS | @@ -225,8 +225,10 @@ test_xla() {
echo "Running Python Tests"
./test/run_tests.sh
- echo "Running MNIST Test"
- python test/test_train_mnist.py --tidy
+ # Disable MNIST test to avoid HTTP error from http://yann.lecun.com/exdb/mnist/
+ # Please add this test back after torchvision dataset is switched to S3 mirror
+ # echo "Running MNIST Test"
+ # python test/test_train_mnist.py --tidy
echo "Running C++ Tests"
pushd test/cpp
|
Corrected version support note
Clarified the note on Elasticsearch version support since v8.0 is not yet officially supported. | @@ -26,7 +26,7 @@ The implementation uses `Elasticsearch <https://www.elastic.co/guide/en/elastics
.. important::
- For Mattermost v6.0, a minimum of Elasticsearch v7.x is supported.
+ For Mattermost v6.0, Elasticsearch v7.x is supported.
Previous versions of Mattermost, including v5.38 and earlier releases, support Elasticsearch v5.x, v6.x, and v7.x.
Deployment guide
|
Updates fastcalc.pyx so that all integer numpy arrays use int64 types.
There were several places where the Cython code expected numpy
arrays with np.int or INT == long C-type. We want all integer arrays
to be specifically int64 arrays, and this commit makes this update
within the fastcalc.pyx file. | @@ -1255,19 +1255,19 @@ def restricted_abs_argmax(np.ndarray[double, ndim=1] ar, np.ndarray[np.int64_t,
#@cython.cdivision(True) # turn off divide-by-zero checking (keeping off for Python behavior of div/mods)
@cython.boundscheck(False) # turn off bounds-checking for entire function
@cython.wraparound(False) # turn off negative index wrapping for entire function
-def fast_compose_cliffords(np.ndarray[INT, ndim=2] s1, np.ndarray[INT, ndim=1] p1,
- np.ndarray[INT, ndim=2] s2, np.ndarray[INT, ndim=1] p2):
+def fast_compose_cliffords(np.ndarray[np.int64_t, ndim=2] s1, np.ndarray[np.int64_t, ndim=1] p1,
+ np.ndarray[np.int64_t, ndim=2] s2, np.ndarray[np.int64_t, ndim=1] p2):
cdef INT i
cdef INT j
cdef INT k
cdef INT N = s1.shape[0] // 2 # Number of qubits
# Temporary space of C^T U C terms
- cdef np.ndarray[INT, ndim=2, mode="c"] inner = np.zeros([2*N, 2*N], dtype=np.int)
+ cdef np.ndarray[np.int64_t, ndim=2, mode="c"] inner = np.zeros([2*N, 2*N], dtype=np.int)
# Outputs
- cdef np.ndarray[INT, ndim=2, mode="c"] s = np.zeros([2*N, 2*N], dtype=np.int)
- cdef np.ndarray[INT, ndim=1, mode="c"] p = np.zeros([2*N], dtype=np.int)
+ cdef np.ndarray[np.int64_t, ndim=2, mode="c"] s = np.zeros([2*N, 2*N], dtype=np.int64)
+ cdef np.ndarray[np.int64_t, ndim=1, mode="c"] p = np.zeros([2*N], dtype=np.int64)
# If C' = [[C^00, C^01], [C^10, C^11]] and U = [[0, 0], [I, 0]]
# then C'^T U C' = [[C^10^T C00, C^10^T C^01], [C^11^T C^00, C^11^T C^01]]
|
Fixed grammar mistakes/typos
Fixed grammar mistakes/typos throughout the text | @@ -5,7 +5,7 @@ First, follow instructions on :doc:`Installation page </intro/installation>`
to install ``deeppavlov`` package for Python 3.6/3.7.
DeepPavlov contains a bunch of great pre-trained NLP models. Each model is
-determined by it's config file. List of models is available on
+determined by its config file. List of models is available on
:doc:`the doc page </features/overview>` or in
the ``deeppavlov.configs``:
@@ -13,7 +13,7 @@ the ``deeppavlov.configs``:
from deeppavlov import configs
-When you're decided on the model (+ config file), there are two ways to train,
+When you've decided on the model (+ config file), there are two ways to train,
evaluate and infer it:
* via `Command line interface (CLI)`_ and
@@ -52,7 +52,7 @@ You can train it in the same simple way:
Dataset will be downloaded regardless of whether there was ``-d`` flag or
not.
- To train on your own data you need to modify dataset reader path in the
+ To train on your own data, you need to modify dataset reader path in the
`train section doc <configuration.html#Train-config>`__. The data format is
specified in the corresponding model doc page.
@@ -116,7 +116,7 @@ You can train it in the same simple way:
Dataset will be downloaded regardless of whether there was ``-d`` flag or
not.
- To train on your own data you need to modify dataset reader path in the
+ To train on your own data, you need to modify dataset reader path in the
`train section doc <configuration.html#Train-config>`__. The data format is
specified in the corresponding model doc page.
@@ -153,7 +153,7 @@ Pretrained models
DeepPavlov provides a wide range of pretrained models and skills.
See :doc:`features overview </features/overview>` for more info. Please
note that most of our models are trained on specific datasets for
-specific tasks and may require further training on you data.
+specific tasks and may require further training on your data.
You can find a list of our out-of-the-box models `below <#out-of-the-box-pretrained-models>`_.
|
Fix typo to match library default
Matches default given at | @@ -667,7 +667,7 @@ You can control the connection pool size using the `limits` keyword
argument on the client. It takes instances of `httpx.Limits` which define:
- `max_keepalive`, number of allowable keep-alive connections, or `None` to always
-allow. (Defaults 10)
+allow. (Defaults 20)
- `max_connections`, maximum number of allowable connections, or` None` for no limits.
(Default 100)
|
Use correct vars for cleanup commands
Ensures that cleanup links with vars will be built with the same vars used by the original link. | @@ -52,7 +52,7 @@ class PlanningService(BasePlanningService):
else:
for agent in operation.agents:
links.extend(await self._check_and_generate_cleanup_links(agent, operation))
- return reversed(await self.trim_links(operation, links, agent))
+ return reversed(links)
async def generate_and_trim_links(self, agent, operation, abilities, trim=True):
"""
@@ -124,7 +124,9 @@ class PlanningService(BasePlanningService):
ability = (await self.get_service('data_svc').locate('abilities',
match=dict(unique=link.ability.unique)))[0]
if ability.cleanup and link.status >= 0:
- links.append(Link(operation=operation.id, command=ability.cleanup, paw=agent.paw, cleanup=1,
+ decoded_cmd = self.decode(ability.cleanup, agent, agent.group, operation.RESERVED)
+ variant, _, _ = await self._build_single_test_variant(decoded_cmd, link.used)
+ links.append(Link(operation=operation.id, command=self.encode_string(variant), paw=agent.paw, cleanup=1,
ability=ability, score=0, jitter=0, status=link_status))
return links
|
test(meta): control JS bundle sizes
Any drastic increase in production bundle sizes should fail in CI. | @@ -12,6 +12,7 @@ import unittest
from contextlib import contextmanager
from functools import wraps
from glob import glob
+from pathlib import Path
from typing import List, Optional
from unittest.case import skipIf
from unittest.mock import patch
@@ -31,6 +32,7 @@ from frappe.query_builder.utils import db_type_is
from frappe.tests.test_query_builder import run_only_if
from frappe.utils import add_to_date, get_bench_path, get_bench_relative_path, now
from frappe.utils.backups import fetch_latest_backups
+from frappe.utils.jinja_globals import bundled_asset
_result: Optional[Result] = None
TEST_SITE = "commands-site-O4PN2QKA.test" # added random string tag to avoid collisions
@@ -692,7 +694,25 @@ class TestSiteMigration(BaseTestCommands):
class TestBenchBuild(BaseTestCommands):
- def test_build_assets(self):
- with cli(frappe.commands.utils.build) as result:
+ def test_build_assets_size_check(self):
+ with cli(frappe.commands.utils.build, "--force --production") as result:
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.exception, None)
+
+ CURRENT_SIZE = 3.7 # MB
+ JS_ASSET_THRESHOLD = 0.1
+
+ hooks = frappe.get_hooks()
+ default_bundle = hooks["app_include_js"]
+
+ default_bundle_size = 0.0
+
+ for chunk in default_bundle:
+ abs_path = Path.cwd() / frappe.local.sites_path / bundled_asset(chunk)[1:]
+ default_bundle_size += abs_path.stat().st_size
+
+ self.assertLessEqual(
+ default_bundle_size / (1024 * 1024),
+ CURRENT_SIZE * (1 + JS_ASSET_THRESHOLD),
+ f"Default JS bundle size increased by {JS_ASSET_THRESHOLD:.2%} or more",
+ )
|
Fixed "Zero length field name in format" error in Inkcape <= 0.91
In Python 2.6 shipped with Inkscape 0.91 and prior the synatx
'Reset ({:.1f})'.format(reset_scale)
leads to a "Zero length field name in format" error. | @@ -232,10 +232,10 @@ edited node in Inkscape."""
self._scale.set(self.scale_factor_after_loading())
reset_scale = self.current_scale_factor if self.current_scale_factor else self.global_scale_factor
- self._reset_button = Tk.Button(box, text="Reset ({:.1f})".format(reset_scale),
+ self._reset_button = Tk.Button(box, text="Reset ({0:.1f})".format(reset_scale),
command=self.reset_scale_factor)
self._reset_button.pack(ipadx=10, ipady=4, pady=5, padx=5, side="left")
- self._global_button = Tk.Button(box, text="Global ({:.1f})".format(self.global_scale_factor),
+ self._global_button = Tk.Button(box, text="Global ({0:.1f})".format(self.global_scale_factor),
command=self.use_global_scale_factor)
self._global_button.pack(ipadx=10, ipady=4, pady=5, padx=5, side="left")
@@ -636,8 +636,8 @@ if TOOLKIT in (GTK, GTKSOURCEVIEW):
# We need buttons with custom labels and stock icons, so we make some
reset_scale = self.current_scale_factor if self.current_scale_factor else self.global_scale_factor
- items = [('tt-reset', 'Reset ({:.1f})'.format(reset_scale), 0, 0, None),
- ('tt-global', 'Global ({:.1f})'.format(self.global_scale_factor), 0, 0, None)]
+ items = [('tt-reset', 'Reset ({0:.1f})'.format(reset_scale), 0, 0, None),
+ ('tt-global', 'Global ({0:.1f})'.format(self.global_scale_factor), 0, 0, None)]
# Forcibly show icons
settings = gtk.settings_get_default()
@@ -656,11 +656,14 @@ if TOOLKIT in (GTK, GTKSOURCEVIEW):
factory.add(new_stock, icon_set)
scale_reset_button = gtk.Button(stock='tt-reset')
- scale_reset_button.set_tooltip_text("Set scale factor to the value this node has been created with ({:.1f})".format(reset_scale))
+ scale_reset_button.set_tooltip_text(
+ "Set scale factor to the value this node has been created with ({0:.1f})".format(reset_scale))
scale_reset_button.connect('clicked', self.reset_scale_factor)
scale_global_button = gtk.Button(stock='tt-global')
- scale_global_button.set_tooltip_text("Set scale factor to the value of the previously edited node in Inkscape ({:.1f})".format(self.global_scale_factor))
+ scale_global_button.set_tooltip_text(
+ "Set scale factor to the value of the previously edited node in Inkscape ({0:.1f})".format(
+ self.global_scale_factor))
scale_global_button.connect('clicked', self.use_global_scale_factor)
scale_box.pack_start(self._scale, True, True, 2)
|
Apply suggestions from code review
Re-updated docs | @@ -371,7 +371,7 @@ def get_detections(last_behavior_time=None, behavior_id=None, filter_arg=None):
def get_fetch_detections(last_created_timestamp=None, filter_arg=None):
- """ Sends detection request, based om created_timestamp field. Used for fetch-incidents
+ """ Sends detection request, based on the created_timestamp field. Used for fetch-incidents
Args:
last_created_timestamp: last created timestamp of the results will be greater than this value.
@@ -616,8 +616,8 @@ def timestamp_length_equalization(timestamp1, timestamp2):
"""
Makes sure the timestamps are of the same length.
Args:
- timestamp1: a timestamp
- timestamp2: a timestamp
+ timestamp1: First timestamp to compare.
+ timestamp2: Second timestamp to compare.
Returns:
the two timestamps in the same length (the shorter one)
|
Metadata API: set default version for MetaFile()
This makes sense to me: if you create a new MetaFile, logically it
is version 1). This does not change serialization in any way.
Practical code becomes slightly nicer as
metafiles = defaultdict(MetaFile)
now works without lambdas. | @@ -1076,7 +1076,7 @@ class MetaFile(BaseFile):
def __init__(
self,
- version: int,
+ version: int = 1,
length: Optional[int] = None,
hashes: Optional[Dict[str, str]] = None,
unrecognized_fields: Optional[Dict[str, Any]] = None,
|
cmake: Update Conan conventions
Automatically created by bincrafters-conventions 0.18.1 | import os
from conans import tools, ConanFile, CMake
from conans import __version__ as conan_version
-from conans.model.version import Version
+from conans.tools import Version
from conans.errors import ConanInvalidConfiguration, NotFoundException, ConanException
|
deprecate legacy linesearch arguments
This patch add deprecation warnings to the use of newton's legacy arguments
`searchrange` and `rebound`, which have been replaced by `minscale`,
`acceptscale` and `maxscale`. | @@ -214,9 +214,14 @@ class NormBased(LineSearch):
@classmethod
def legacy(cls, kwargs):
- minscale, acceptscale = kwargs.pop('searchrange', (.01, 2/3))
- maxscale = kwargs.pop('rebound', 2.)
- return cls(minscale=minscale, acceptscale=acceptscale, maxscale=maxscale)
+ args = {}
+ if 'searchrange' in kwargs:
+ args['minscale'], args['acceptscale'] = kwargs.pop('searchrange')
+ if 'rebound' in kwargs:
+ args['maxscale'] = kwargs.pop('rebound')
+ if args:
+ warnings.deprecation('the searchrange and rebound arguments are deprecated; use linesearch=solver.NormBased(minscale=searchrange[0], acceptscale=searchrange[1], maxscale=rebound) instead')
+ return cls(**args)
def __call__(self, res0, dres0, res1, dres1):
if not numpy.isfinite(res1).all():
|
Switched from reason as argument to reason as user input
Long string arguments taht need to be quoted are annoying, plus now that
the command is named 'flag', flagging with a reason ought to be part of
the standard workflow. | @@ -108,13 +108,6 @@ class Command(BaseCommand):
default='',
help='End date',
)
- parser.add_argument(
- '--flag-with-reason',
- action='store',
- dest='reason',
- default='',
- help='If provided, set build_broken to True and build_broken_reason to this string',
- )
def handle(self, check_function, **options):
check_fn = CHECK_FUNCTIONS[check_function]
@@ -122,7 +115,6 @@ class Command(BaseCommand):
start = options['startdate']
end = options['enddate']
ids = options['build_ids']
- reason = options['reason']
print('Starting...\n')
if not ids:
@@ -131,6 +123,8 @@ class Command(BaseCommand):
ids = ids.split(',')
print('Checking {} builds\n'.format(len(ids)))
+ reason = input("Reason to use as build_broken_reason (leave blank to skip flagging)? ")
+
for message in find_broken_builds(check_fn, ids, reason):
self.stderr.write(message)
|
Remove coveralls
We need only one coverage test, we'll be using codecov. | @@ -51,7 +51,7 @@ before_install:
# Customise the testing environment
# ---------------------------------
- PACKAGES="$PACKAGES dask pyfftw cartopy basemap h5py"
- - PACKAGES="$PACKAGES pytest pytest-cov coveralls codecov";
+ - PACKAGES="$PACKAGES pytest pytest-cov codecov";
- export CYTHON_COVERAGE=1;
- travis_retry conda install --quiet $PACKAGES
@@ -69,6 +69,5 @@ script:
- pytest --pyargs pysteps --cov=pysteps -ra;
after_success:
- - coveralls
- codecov
|
Fix "Register host" task in rhsm-subscription role
Fixes | server_hostname: "{{ rhsm_server }}"
state: present
pool: "{{ rhsm_pool }}"
- when: "('not registered' in subscribed.stdout or 'Current' not in subscribed.stdout) and rhsm_user is defined and rhsm_user"
+ when: "'not registered' in subscribed.stdout and rhsm_user is defined and rhsm_user"
- name: Check if subscription is attached
command: subscription-manager list --consumed --pool-only --matches="{{ rhsm_pool }}"
|
ceph-nfs: change ganesha devel source
The download.nfs-ganesha.org source for nfs-ganesha on CentOS isn't
available anymore.
Let's switch back to shaman since we have builds available now. | - name: red hat based systems - dev repo related tasks
block:
- name: add nfs-ganesha dev repo
- yum_repository:
- name: nfs-ganesha
- baseurl: https://download.nfs-ganesha.org/3/LATEST/CentOS/el-$releasever/$basearch
- description: nfs-ganesha repository
- gpgcheck: true
- gpgkey: https://download.nfs-ganesha.org/3/rsa.pub
- file: nfs-ganesha-dev
-
- - name: add nfs-ganesha dev noarch repo
- yum_repository:
- name: nfs-ganesha-noarch
- baseurl: https://download.nfs-ganesha.org/3/LATEST/CentOS/el-$releasever/noarch
- description: nfs-ganesha noarch repository
- gpgcheck: true
- gpgkey: https://download.nfs-ganesha.org/3/rsa.pub
- file: nfs-ganesha-dev
+ get_url:
+ url: 'https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/flavors/{{ nfs_ganesha_flavor }}/repo'
+ dest: /etc/yum.repos.d/nfs-ganesha-dev.repo
+ force: true
when:
- nfs_ganesha_dev | bool
- ceph_repository == 'dev'
|
Update formats.py
include matroska format options for certain image based subtitles since this appears to be the only way for ffmpeg to extract them | @@ -124,8 +124,24 @@ class SsaFormat(BaseFormat):
ffmpeg_format_name = 'ass'
+class DVDSubFormat(BaseFormat):
+ """
+ MKS subtitle format
+ """
+ format_name = 'dvd_subtitle'
+ ffmpeg_format_name = 'matroska'
+
+
+class DVBSubFormat(BaseFormat):
+ """
+ MKS subtitle format
+ """
+ format_name = 'dvb_subtitle'
+ ffmpeg_format_name = 'matroska'
+
+
format_list = [
OggFormat, AviFormat, MkvFormat, WebmFormat, FlvFormat,
MovFormat, Mp4Format, MpegFormat, Mp3Format, SrtFormat,
- WebVTTFormat, SsaFormat, PGSFormat
+ WebVTTFormat, SsaFormat, PGSFormat, DVDSubFormat, DVBSubFormat
]
|
Add documentation on how to pull without conflicts
Per | @@ -168,6 +168,20 @@ final hosts file.
The `whitelist` is not tracked by git, so any changes you make won't be
overridden when you `git pull` this repo from `origin` in the future.
+### How do I pull this repo without losing changes?
+
+The easiest way to update this repo without overriding the `whitelist`,
+`blacklist`, or `myhosts` files is to run the following commands. Any changes
+to these files will be placed into a stash so that you can pop after the
+updated repo is pulled.
+```bash
+git checkout -- hosts readmeData.json
+git stash
+git pull
+git stash pop
+```
+After this the `updateHostsFile.py` script can be run to update the hosts
+file.
## What is a hosts file?
|
expand section for OR primary
Expanding the "Demographics, Hospital Capacity and Testing Tables" section. I'm using my experimental (but seemingly functional) method of finding elements by xpath, adding a unique attribute, and then .click()ing that attribute. | @@ -65,8 +65,11 @@ primary:
await page.waitForDelay(5000);
page.click("#prefix-dismissButton");
await page.waitForFunction(()=>document.querySelector("#main-content").textContent!=="");
+ await page.evaluate(() => { document.evaluate("//button[contains(text(),'Demographics, Hospital Capacity and Testing Tables')]", document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue.setAttribute("ctpclickhere", "ctpclickhere"); });
+ page.click("button[ctpclickhere]");
+ await page.waitForDelay(5000);
page.done();
- message: clicking button to get rid of popup for OR, waiting 5 sec
+ message: clicking button to get rid of popup for OR, click "Demographics, Hospital Capacity and Testing Tables", waiting 5 sec
PA:
overseerScript: page.manualWait(); await page.waitForDelay(30000); page.done();
|
category.py: remove not relevant docstring (Monobook)
Removed not relevant docstring part.
"""
Important:
* this bot is written to work with the MonoBook skin, so make sure
* your bot account uses this skin
""" | @@ -855,10 +855,6 @@ class CategoryTidyRobot(pywikibot.Bot):
you to find out what the article is about and in which other categories it
currently is.
- Important:
- * this bot is written to work with the MonoBook skin, so make sure your bot
- account uses this skin
-
"""
def __init__(self, catTitle, catDB, namespaces=None):
|
delete all items of the group type spec
delete all items of the group type spec to complete testcase | @@ -73,10 +73,11 @@ class GroupTypeSpecsTest(base.BaseVolumeAdminTest):
self.assertEqual(list_specs, body)
# Delete specified item of group type specs
- delete_key = 'key1'
+ delete_keys = ['key1', 'key2', 'key3']
+ for it in delete_keys:
self.admin_group_types_client.delete_group_type_specs_item(
- group_type['id'], delete_key)
+ group_type['id'], it)
self.assertRaises(
lib_exc.NotFound,
self.admin_group_types_client.show_group_type_specs_item,
- group_type['id'], delete_key)
+ group_type['id'], it)
|
Fix bug with crosshair cursor
Previous refactoring broke the "mouseMoved" callback | @@ -29,7 +29,7 @@ from __future__ import division
__authors__ = ["V.A. Sole", "T. Vincent"]
__license__ = "MIT"
-__date__ = "02/10/2017"
+__date__ = "16/10/2017"
import logging
@@ -151,13 +151,16 @@ class PositionInfo(qt.QWidget):
"""
if event['event'] == 'mouseMoved':
x, y = event['x'], event['y']
- self._updateStatusBar(x, y)
+ xPixel, yPixel = event['xpixel'], event['ypixel']
+ self._updateStatusBar(x, y, xPixel, yPixel)
- def _updateStatusBar(self, x, y):
+ def _updateStatusBar(self, x, y, xPixel, yPixel):
"""Update information from the status bar using the definitions.
:param float x: Position-x in data
:param float y: Position-y in data
+ :param float xPixel: Position-x in pixels
+ :param float yPixel: Position-y in pixels
"""
styleSheet = "color: rgb(0, 0, 0);" # Default style
@@ -180,8 +183,6 @@ class PositionInfo(qt.QWidget):
closestInPixels = self.plot.dataToPixel(
xClosest, yClosest, axis=activeCurve.getYAxis())
if closestInPixels is not None:
- xPixel, yPixel = event['xpixel'], event['ypixel']
-
if (abs(closestInPixels[0] - xPixel) < 5 and
abs(closestInPixels[1] - yPixel) < 5):
# Update label style sheet
|
gui : Fix crash when starting Gaffer with unicode in the clipboard.
We just skip the unsupported characters. | @@ -153,7 +153,7 @@ class gui( Gaffer.Application ) :
from Qt import QtWidgets
- text = str( QtWidgets.QApplication.clipboard().text() )
+ text = str( QtWidgets.QApplication.clipboard().text().encode( 'ascii', 'ignore' ) )
if text :
with Gaffer.BlockedConnection( self.__clipboardContentsChangedConnection ) :
self.root().setClipboardContents( IECore.StringData( text ) )
|
Update Wallet.py CreateAddress
Show correct notification if number of addresses to be created is not specified (should be 0 > x <= 3) | @@ -21,7 +21,7 @@ def CreateAddress(prompter, wallet, args):
try:
int_args = int(args)
except Exception as e:
- print('Enter the number of addresses to create < 3.')
+ print('Enter the number of addresses to create <= 3.')
return False
if wallet is None:
|
Fix builds with GNAT GPL 2016
TN: | @@ -19,6 +19,8 @@ with Ada.Text_IO; use Ada.Text_IO;
with Ada.Unchecked_Conversion;
with Ada.Unchecked_Deallocation;
+with GNATCOLL.Traces;
+
with Langkit_Support.Array_Utils;
with Langkit_Support.Images; use Langkit_Support.Images;
with Langkit_Support.Slocs; use Langkit_Support.Slocs;
@@ -1685,7 +1687,7 @@ package body ${ada_lib_name}.Analysis is
procedure Trigger_Envs_Debug (Is_Active : Boolean) is
begin
- AST_Envs.Me.Set_Active (Is_Active);
+ GNATCOLL.Traces.Set_Active (AST_Envs.Me, Is_Active);
end Trigger_Envs_Debug;
----------------------
|
StructMetaclass: properly handle null _generic_list_type
TN: | @@ -1512,8 +1512,10 @@ class StructMetaclass(CompiledTypeMetaclass):
# If this is the root grammar type, create the generic list type name
if is_root_grammar_class:
- generic_list_type_name = dct.pop('_generic_list_type',
- cls.__name__ + 'BaseList')
+ generic_list_type_name = (
+ dct.pop('_generic_list_type', None)
+ or cls.__name__ + 'BaseList'
+ )
@classmethod
def element_type(cls):
|
Some small spelling fixes for vault docs
Refs | @@ -32,8 +32,8 @@ statically, as above, or as an environment variable:
After the profile is created, edit the salt master config file and configure
the external pillar system to use it. A path pointing to the needed vault key
-must also be specified so that vault knows where to look. Vault do not apply
-recursive list, so each required key need to be individually mapped.
+must also be specified so that vault knows where to look. Vault does not apply
+a recursive list, so each required key needs to be individually mapped.
.. code-block:: yaml
@@ -41,14 +41,14 @@ recursive list, so each required key need to be individually mapped.
- vault: myvault path=secret/salt
- vault: myvault path=secret/another_key
-Also, on vault each key need to have all the key values pairs with the names you
+Each key needs to have all the key-value pairs with the names you
require. Avoid naming every key 'password' as you they will collide:
.. code-block:: bash
$ vault write secret/salt auth=my_password master=127.0.0.1
-you can then use normal pillar requests to get each key pair directly from
+You can then use normal pillar requests to get each key pair directly from
pillar root. Example:
.. code-block:: bash
|
security updates and yaml
setting proper schema for yaml and was notified of security updates: libssl1.0.0: 3611-1, 3628-1 | name: slcli # check to see if it's available
-version: '5.4.4.0+git' # check versioning
+version: '5.4.4.1+git' # check versioning
summary: Python based SoftLayer API Tool. # 79 char long summary
description: |
A command-line interface is also included and can be used to manage various SoftLayer products and services.
@@ -20,7 +20,8 @@ parts:
my-part:
source: https://github.com/softlayer/softlayer-python
source-type: git
- plugin: python3
+ plugin: python
+ python-version: python3
build-packages:
- python3
|
Add Dockerfile
Add Dockerfile reference | @@ -18,9 +18,12 @@ Getting Started
See `docs/HOWTO.rst`_.
There is also an `installer`_ available that simplifies the installation on various Linux-based distributions.
+There is also an `Dockerfile`_ available .
.. _installer: https://github.com/bauerj/electrumx-installer
+.. _Dockerfile: https://github.com/followtheart/electrumx-docker
+
Features
========
|
Add dashboard_data ImportLog to measures fixture
I'd previously added this by hand to the fixture, which I shouldn't have
done. | @@ -130,10 +130,8 @@ class Command(BaseCommand):
# import_measures uses this ImportLog to work out which months it
# should import data.
ImportLog.objects.create(category="prescribing", current_at="2018-08-01")
-
- # Practice and CCG homepages need this to work out which PPU savings to
- # show.
- ImportLog.objects.create(category="ppu", current_at="2018-08-01")
+ # The practice, CCG etc dashboards use this date
+ ImportLog.objects.create(category="dashboard_data", current_at="2018-08-01")
# Set up BQ, and upload STPs, CCGs, Practices.
Client("measures").create_dataset()
|
Remove gcloud installation.
gcloud is already part of the base image. | @@ -508,14 +508,6 @@ RUN pip install --upgrade dask && \
ln -sf /usr/lib/x86_64-linux-gnu/libpixman-1.so.0.34.0 /opt/conda/lib/libpixman-1.so.0.38.0 && \
/tmp/clean-layer.sh
-# gcloud SDK https://cloud.google.com/sdk/docs/quickstart-debian-ubuntu
-RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] http://packages.cloud.google.com/apt cloud-sdk main" \
- | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \
- curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
- apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \
- apt-get update -y && apt-get install google-cloud-sdk -y && \
- /tmp/clean-layer.sh
-
# Add BigQuery client proxy settings
ENV PYTHONUSERBASE "/root/.local"
ADD patches/kaggle_gcp.py /root/.local/lib/python3.7/site-packages/kaggle_gcp.py
|
Update js_script.txt
Related to | @@ -1056,3 +1056,38 @@ solo8.biz
vinuser6.biz
w1sercher.biz
wwserch41.biz
+
+# Reference: https://twitter.com/CERT_Polska_en/status/1270623116931317760
+# Reference: https://pastebin.com/raw/Ap38Fr7e
+# Reference: https://pastebin.com/raw/YyYs8Her
+
+/myjs28_frr_b7.js
+/myjs28_frr_c1.js
+/myjs28_frr_j2.js
+/myjs28_frr_n01.js
+/myjs28_frr_n02.js
+/myjs28_frr_s17.js
+/myjs28_frr_s20.js
+/myjs28_frr_s21.js
+/myjs28_frr_s22.js
+/myjs28_frr_s23.js
+/myjs28_frr_s29.js
+/myjs28_frr_s30.js
+/myjs28_frr_s31.js
+/myjs28_frr_s33.js
+/myjs28_frr_s35.js
+/myjs28_frr_s36.js
+/myjs28_frr_s37.js
+/myjs28_frr_s38.js
+/myjs28_frr_s39.js
+/myjs28_frr_s4.js
+/myjs28_frr_s45.js
+/myjs28_frr_s47.js
+/myjs28_frr_s48.js
+/myjs28_frr_s49.js
+/myjs28_frr_s50.js
+/myjs28_frr_s51.js
+/myjs28_frr_s52.js
+/myjs28_frr_s55.js
+/myjs28_frr_s7.js
+/myjs28_frr_w1.js
|
[docs] Releases and deprecations policy
Test Plan: manual inspection
Reviewers: schrockn, alangenfeld, nate, prha | "path": "/community/contributing",
"name": "Contributing"
},
+ {
+ "path": "/community/releases",
+ "name": "Releases & Deprecations"
+ },
{
"path": "https://join.slack.com/t/dagster/shared_invite/enQtNjEyNjkzNTA2OTkzLTI0MzdlNjU0ODVhZjQyOTMyMGM1ZDUwZDQ1YjJmYjI3YzExZGViMDI1ZDlkNTY5OThmYWVlOWM1MWVjN2I3NjU",
"isAbsolutePath": true,
|
Transactionally drain MachineLeases
Otherwise this interferes with other operations on MachineLease entities.
Review-Url: | @@ -410,6 +410,43 @@ def ensure_entities_exist(max_concurrent=50):
ndb.Future.wait_all(futures)
[email protected]_tasklet
+def drain_entity(key):
+ """Drains the given MachineLease.
+
+ Args:
+ key: ndb.Key for a MachineLease entity.
+ """
+ machine_lease = yield key.get_async()
+ if not machine_lease:
+ logging.error('MachineLease does not exist\nKey: %s', key)
+ return
+
+ if machine_lease.drained:
+ return
+
+ logging.info(
+ 'Draining MachineLease:\nKey: %s\nHostname: %s',
+ key,
+ machine_lease.hostname,
+ )
+ machine_lease.drained = True
+ yield machine_lease.put_async()
+
+
[email protected]
+def ensure_entity_drained(machine_lease):
+ """Ensures the given MachineLease is drained.
+
+ Args:
+ machine_lease: MachineLease entity.
+ """
+ if machine_lease.drained:
+ return
+
+ yield drain_entity(machine_lease.key)
+
+
def drain_excess(max_concurrent=50):
"""Marks MachineLeases beyond what is needed by their MachineType as drained.
@@ -438,8 +475,7 @@ def drain_excess(max_concurrent=50):
if len(futures) == max_concurrent:
ndb.Future.wait_any(futures)
futures = [future for future in futures if not future.done()]
- machine_lease.drained = True
- futures.append(machine_lease.put_async())
+ futures.append(ensure_entity_drained(machine_lease))
if futures:
ndb.Future.wait_all(futures)
|
Handle spaces in git output command
Fixes: | @@ -722,7 +722,7 @@ def get_yaml_files(options: Namespace) -> dict:
git_command,
stderr=subprocess.STDOUT,
universal_newlines=True
- ).split()
+ ).splitlines()
except subprocess.CalledProcessError as exc:
_logger.warning(
"Failed to discover yaml files to lint using git: %s",
|
Omit PATH/LD_LIBRARY_PATH from --env output
Not needed anymore (since z3/msat now automagically discover SO/DLL). | @@ -225,13 +225,10 @@ def main():
if platform.system().lower() == "windows":
if options.powershell:
print('$env:PythonPath += ";%s"' % bindings_dir)
- print('$env:Path += ";%s"' % bindings_dir)
else:
print("set PYTHONPATH=" + bindings_dir + ";%PYTHONPATH%")
- print("set PATH=" + bindings_dir + ";%PATH%")
else:
print("export PYTHONPATH=\"" + bindings_dir + ":${PYTHONPATH}\"")
- print("export LD_LIBRARY_PATH=\"" + bindings_dir + ":${LD_LIBRARY_PATH}\"")
else:
if len(solvers_to_install) == 0:
|
pull out get_all_case_updates in AdvancedForm
just like was just done in Form | @@ -3084,20 +3084,26 @@ class AdvancedForm(IndexedFormBase, NavMenuItemMediaMixin):
return errors
def get_case_updates(self, case_type):
- updates = set()
+ return self.get_all_case_updates().get(case_type, [])
+
+ def get_all_case_updates(self):
+ updates_by_case_type = defaultdict(set)
format_key = self.get_case_property_name_formatter()
for action in self.actions.get_all_actions():
- if action.case_type == case_type:
- updates.update(format_key(*item)
- for item in six.iteritems(action.case_properties))
+ case_type = action.case_type
+ updates_by_case_type[case_type].update(
+ format_key(*item) for item in six.iteritems(action.case_properties))
if self.schedule and self.schedule.enabled and self.source:
xform = self.wrapped_xform()
self.add_stuff_to_xform(xform)
- scheduler_updates = xform.get_scheduler_case_updates()[case_type]
+ scheduler_updates = xform.get_scheduler_case_updates()
else:
- scheduler_updates = set()
+ scheduler_updates = {}
- return updates.union(scheduler_updates)
+ for case_type, updates in updates_by_case_type.items():
+ if case_type in scheduler_updates:
+ updates |= scheduler_updates[case_type]
+ return updates_by_case_type
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
|
Provide a default LANG
This works around an issue on macos+Python3 where Python3 is compiled
with ASCII support. Issues is covered here: | @@ -121,7 +121,7 @@ def _run(cmd, quiet=False, ignore=None, timeout=60):
cmd_env["GUILD_HOME"] = os.path.join(WORKSPACE, ".guild")
cmd_env["PATH"] = os.getenv("PATH")
cmd_env["COLUMNS"] = "999"
- cmd_env["LANG"] = os.getenv("LANG", "")
+ cmd_env["LANG"] = os.getenv("LANG", "en_US.UTF-8")
cmd_cwd = WORKSPACE if not _cwd else os.path.join(WORKSPACE, _cwd)
p = subprocess.Popen(
[cmd],
|
Attempt to make axon unit test coverage more consistent.
Add missing asyncio.CancelledError stanzas, pragma nocovs | @@ -499,7 +499,10 @@ class BlobStor(s_cell.Cell):
cur_offset = last_offset + 1
await self.writer.updateCloneProgress(cur_offset)
- except Exception:
+ except asyncio.CancelledError: # pragma: no cover
+ break
+
+ except Exception: # pragma: no cover
if not self.isfini:
logger.exception('BlobStor.cloneeLoop error')
@@ -729,6 +732,10 @@ class _ProxyKeeper(s_base.Base):
for bsid in bsidsrot:
try:
blobstor = await self.get(bsid)
+
+ except asyncio.CancelledError: # pragma: no cover
+ break
+
except Exception:
logger.warning('Trouble connecting to BSID %r', bsid)
continue
@@ -887,6 +894,10 @@ class Axon(s_cell.Cell):
for path in paths:
try:
await self._start_watching_blobstor(path)
+
+ except asyncio.CancelledError: # pragma: no cover
+ break
+
except Exception:
logger.error('At axon startup, failed to connect to stored blobstor path %s', _path_sanitize(path))
|
MAINT: Ensure that datetime dot, correlate, and vdot cannot happen
This is by just undefining the function that should never have been
defined to begin with. | @@ -3803,17 +3803,23 @@ BOOL_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n,
*((npy_bool *)op) = tmp;
}
+/*
+ * `dot` does not make sense for times, for DATETIME it never worked.
+ * For timedelta it does/did , but should probably also just be removed.
+ */
+#define DATETIME_dot NULL
+
/**begin repeat
*
* #name = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
* LONG, ULONG, LONGLONG, ULONGLONG,
- * LONGDOUBLE, DATETIME, TIMEDELTA#
+ * LONGDOUBLE, TIMEDELTA#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_longdouble, npy_datetime, npy_timedelta#
+ * npy_longdouble, npy_timedelta#
* #out = npy_long, npy_ulong, npy_long, npy_ulong, npy_long, npy_ulong,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_longdouble, npy_datetime, npy_timedelta#
+ * npy_longdouble, npy_timedelta#
*/
static void
@name@_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp n,
|
Fix typo in IVProcedure in the tutorial.
Thanks | @@ -264,8 +264,8 @@ Now that you have a background on how to use the different features of the Proce
data_points = IntegerParameter('Data points', default=50)
averages = IntegerParameter('Averages', default=50)
- max_current = FloatParameter('Maximum Current', unit='A', default=0.01)
- min_current = FloatParameter('Minimum Current', unit='A', default=-0.01)
+ max_current = FloatParameter('Maximum Current', units='A', default=0.01)
+ min_current = FloatParameter('Minimum Current', units='A', default=-0.01)
DATA_COLUMNS = ['Current (A)', 'Voltage (V)', 'Voltage Std (V)']
|
Enable pre-commit and final setup tweaks
Remove true on ci check
Prevent icons final newline
Change column number in templates linting | @@ -24,10 +24,10 @@ jobs:
# Filter out known false positives, while preserving normal output and error codes.
# See https://github.com/motet-a/jinjalint/issues/18.
# And https://circleci.com/docs/2.0/configuration-reference/#default-shell-options.
- - run: git ls-files '*.html' | xargs pipenv run djhtml --check || true
+ - run: git ls-files '*.html' | xargs pipenv run djhtml --check
- run:
shell: /bin/bash -e
- command: pipenv run jinjalint --parse-only wagtail | grep -v 'welcome_page.html:6:70' | tee /dev/tty | wc -l | grep -q '0'
+ command: pipenv run jinjalint --parse-only wagtail | grep -v 'welcome_page.html:6:76' | tee /dev/tty | wc -l | grep -q '0'
- run: pipenv run doc8 docs
- run: DATABASE_NAME=wagtail.db pipenv run python -u runtests.py
|
[docs] include deprecated type io decorators in apidocs
Test Plan: inspect
Reviewers: sashank | @@ -384,6 +384,14 @@ Making New Types
.. autofunction:: dagster_type_materializer
+.. autofunction:: input_hydration_config
+
+.. autofunction:: output_materialization_config
+
+.. autofunction:: input_selector_schema
+
+.. autofunction:: output_selector_schema
+
.. autofunction:: usable_as_dagster_type
.. autofunction:: make_python_type_usable_as_dagster_type
|
Display error message on the login form
Includes OAuth. | {% block content %}
<h2>Login</h2>
+
+ {% if messages %}
+ <ul class="messages">
+ {% for message in messages %}
+ <li{% if message.tags %} class="{{ message.tags }}" {% endif %}>{{ message }}</li>
+ {% endfor %}
+ </ul>
+ {% endif %}
+
<form method="post">
{% csrf_token %}
{{ form.as_p }}
|
Replace assertRaisesRegexp with assertRaisesRegex
assertRaisesRegexp was renamed to assertRaisesRegex since Py3.2
For more details, please check:
unittest.html#unittest.TestCase.assertRaisesRegex | @@ -67,7 +67,7 @@ class RBACPolicyTest(common.HeatTestCase):
msg = ("Invalid action %(action)s for object type %(type)s." %
{'action': invalid_action,
'type': obj_type})
- self.assertRaisesRegexp(exception.StackValidationFailed, msg,
+ self.assertRaisesRegex(exception.StackValidationFailed, msg,
self.rbac.validate)
def test_validate_action_for_network(self):
|
C3XGate is 3-qubit
* C3XGate is 3-qubit
Typo in documentation 4-qubit -> 3-qubit
class C3XGate(ControlledGate):
r"""The 3-qubit controlled X gate.
* Better description for C3Xgate | @@ -531,7 +531,7 @@ class C3SXGate(ControlledGate):
class C3XGate(ControlledGate):
- r"""The 4-qubit controlled X gate.
+ r"""The X gate controlled on 3 qubits.
This implementation uses :math:`\sqrt{T}` and 14 CNOT gates.
"""
|
Update citation instructions
Fixes | @@ -75,11 +75,21 @@ Garage releases a new stable version approximately every 4 months, in February,
## Citing garage
-If you use garage for academic research, you are highly encouraged to cite the following paper on the original rllab implementation:
-
-- Yan Duan, Xi Chen, Rein Houthooft, John Schulman, Pieter Abbeel. "[Benchmarking Deep Reinforcement Learning for Continuous Control](http://arxiv.org/abs/1604.06778)". _Proceedings of the 33rd International Conference on Machine Learning (ICML), 2016._
+If you use garage for academic research, please cite the repository using the following BibTeX entry. You should update the `commit` field with the commit or release tag your publication uses.
+
+```latex
+@misc{garage,
+ author = {The garage contributors},
+ title = {Garage: A toolkit for reproducible reinforcement learning research},
+ year = {2019},
+ publisher = {GitHub},
+ journal = {GitHub repository},
+ howpublished = {\url{https://github.com/rlworkgroup/garage}},
+ commit = {be070842071f736eb24f28e4b902a9f144f5c97b}
+}
+```
## Credits
-garage is based on a predecessor project called [rllab](https://github.com/rll/rllab). The garage project is grateful for the contributions of the original rllab authors, and hopes to continue advancing the state of reproducibility in RL research in the same spirit.
+The original code for garage was adopted from predecessor project called [rllab](https://github.com/rll/rllab). The garage project is grateful for the contributions of the original rllab authors, and hopes to continue advancing the state of reproducibility in RL research in the same spirit.
-rllab was originally developed by Rocky Duan (UC Berkeley/OpenAI), Peter Chen (UC Berkeley), Rein Houthooft (UC Berkeley/OpenAI), John Schulman (UC Berkeley/OpenAI), and Pieter Abbeel (UC Berkeley/OpenAI).
+rllab was developed by Rocky Duan (UC Berkeley/OpenAI), Peter Chen (UC Berkeley), Rein Houthooft (UC Berkeley/OpenAI), John Schulman (UC Berkeley/OpenAI), and Pieter Abbeel (UC Berkeley/OpenAI).
|
Change Directory into Prefect to Streamline Copy Paste (PrefectHQ/orion#5749)
Change directory into prefect repository to allow copy paste install for contributing.
Previously, developers were unable to copy all commands and run, due to missing change directory. | @@ -18,6 +18,7 @@ First, you'll need to download the source code and install an editable version o
```bash
# Clone the repository and switch to the 'orion' branch
git clone https://github.com/PrefectHQ/prefect.git
+cd prefect
git checkout orion
# Install the package with development dependencies
pip install -e ".[dev]"
|
Finishes off drift report so it's working now.
Makes some minor updates to drift report so that all the plumbing
lines up. This report will need to be updated to utilize the new
framework more fully in the future; currently it's just jammed into the
new API. | @@ -1534,7 +1534,7 @@ def construct_nqnoise_report(results, title="auto",
workspace=ws)
-def construct_drift_report(results, gss, title='auto', ws=None, verbosity=1):
+def construct_drift_report(results, title='auto', ws=None, verbosity=1):
"""
Creates a Drift report.
@@ -1544,10 +1544,12 @@ def construct_drift_report(results, gss, title='auto', ws=None, verbosity=1):
-------
:class:`Report` : A constructed report object
"""
+ from ..protocols import StabilityAnalysisResults as _StabilityAnalysisResults
from ..extras.drift.stabilityanalyzer import StabilityAnalyzer
from ..extras.drift import driftreport
- assert(isinstance(results, StabilityAnalyzer)), "Support for multiple results as a Dict is not yet included!"
- singleresults = results
+ assert(isinstance(results, _StabilityAnalysisResults)), "Support for multiple results as a Dict is not yet included!"
+ gss = results.data.edesign.circuit_structs[-1]
+ singleresults = results.stabilityanalyzer
printer = _VerbosityPrinter.build_printer(verbosity) # , comm=comm)
@@ -1567,10 +1569,10 @@ def construct_drift_report(results, gss, title='auto', ws=None, verbosity=1):
results_dict = results if isinstance(results, dict) else {"unique": results}
- drift_switchBd = driftreport._create_drift_switchboard(ws, results, gss)
+ drift_switchBd = driftreport._create_drift_switchboard(ws, results.stabilityanalyzer, gss)
# Sets whether or not the dataset key is a switchboard or not.
- if len(results.data.keys()) > 1:
+ if len(singleresults.data.keys()) > 1:
dskey = drift_switchBd.dataset
arb_dskey = list(singleresults.data.keys())[0]
else:
@@ -1581,8 +1583,9 @@ def construct_drift_report(results, gss, title='auto', ws=None, verbosity=1):
printer.log("*** Generating switchboard ***")
#Create master switchboard
+ stabilityanalyzer_dict = {k: res.stabilityanalyzer for k, res in results_dict.items()}
switchBd, _dataset_labels = \
- driftreport._create_switchboard(ws, results_dict)
+ driftreport._create_switchboard(ws, stabilityanalyzer_dict)
global_qtys = {
'title': title,
@@ -1608,7 +1611,7 @@ def construct_drift_report(results, gss, title='auto', ws=None, verbosity=1):
html='~drift_html_report',
pdf='drift_pdf_report.tex'
)
- return _Report(templates, results, sections, set(), global_qtys, report_params, ws)
+ return _Report(templates, singleresults, sections, set(), global_qtys, report_params, workspace=ws)
# # XXX this needs to be revised into a script
|
Update vtx.py
added "extra8" reset | @@ -20,6 +20,7 @@ class Vtx(Base):
self.header.read(self.reader)
try:
+ self.store_value('extra8', False)
self.reader.seek(self.header.body_part_offset)
for _ in range(self.header.body_part_count):
body_part = BodyPart()
|
Missing return in CV API
DELETE /agents and PUT /agents are missing return control and run
into unintended path. | @@ -189,6 +189,8 @@ class AgentsHandler(BaseHandler):
common.echo_json_response(self, 400, "uri not supported")
logger.warning(
'DELETE returning 400 response. uri not supported: ' + self.request.path)
+ return
+
try:
agent = session.query(VerfierMain).filter_by(
agent_id=agent_id).first()
@@ -362,6 +364,7 @@ class AgentsHandler(BaseHandler):
common.echo_json_response(self, 404, "agent id not found")
logger.info(
'PUT returning 404 response. agent id: ' + agent_id + ' not found.')
+ return
if "reactivate" in rest_params:
agent.operational_state = cloud_verifier_common.CloudAgent_Operational_State.START
|
Upgrade flake8 and related dependencies
The resolved pycodestyle version doesn't seem to be compatible with the
specified flake8 version. Upgrade flake8 and friends to the latest
compatible versions. | @@ -51,10 +51,10 @@ DEPLOY_REQ = [
]
FLAKE8_REQ = [
- 'flake8-print == 3.1.0',
+ 'flake8-print == 3.1.4',
'flake8-comprehensions == 1.4.1',
- 'pep8-naming == 0.8.2',
- 'flake8 == 3.7.8'
+ 'pep8-naming == 0.11.1',
+ 'flake8 == 3.9.0'
]
TESTS_REQ = [
|
Python3.8: Mark constants as user provided
* The new method was not marking them as user provided, which lead
to warnings for too large values. | @@ -181,8 +181,11 @@ def buildVariableReferenceNode(provider, node, source_ref):
# Python3.4 or higher, True and False, are not given as variables anymore.
+# Python3.8, all kinds of constants are like this.
def buildNamedConstantNode(node, source_ref):
- return makeConstantRefNode(constant=node.value, source_ref=source_ref)
+ return makeConstantRefNode(
+ constant=node.value, source_ref=source_ref, user_provided=True
+ )
def buildConditionNode(provider, node, source_ref):
|
Set maxmemory-policy for redis: allkeys-lru
TODO: evaluate what memoizations should be set with a (very long)
expiry for users who havent logged in for a while, but wont
but memoized by the ctf-stats every minute. Off top of head,
get_groups and team_score_progression | @@ -589,7 +589,7 @@ requirepass "{{ picoWeb_db_password | default('insecure') }}"
#
# The default is:
#
-# maxmemory-policy noeviction
+maxmemory-policy allkeys-lru
# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can tune it for speed or
|
SecurityProfile: Accept empty string in runbook
When the runbook has an empty string (important for pipelines), the security profile should use the default requirement. | @@ -43,8 +43,18 @@ class SecurityProfileSettings(schema.FeatureSettings):
],
),
metadata=field_metadata(
- decoder=partial(
- search_space.decode_set_space_by_type, base_type=SecurityProfileType
+ decoder=lambda input: (
+ search_space.decode_set_space_by_type(
+ data=input, base_type=SecurityProfileType
+ )
+ if input
+ else search_space.SetSpace(
+ items=[
+ SecurityProfileType.Standard,
+ SecurityProfileType.Boot,
+ SecurityProfileType.CVM,
+ ]
+ )
)
),
)
|
tests/composition/test_compotisition_vector: Split parametrized arguments.
Better readability.
pytest.marks and test naming work better. | @@ -1994,7 +1994,8 @@ class TestRun:
@pytest.mark.composition
@pytest.mark.benchmark(group="LinearComposition Vector")
- @pytest.mark.parametrize("mode, vector_length", product(('Python', pytest.param('LLVM', marks=pytest.mark.llvm), pytest.param('LLVMExec', marks=pytest.mark.llvm)), [2**x for x in range(1)]))
+ @pytest.mark.parametrize("mode", ['Python', pytest.param('LLVM', marks=pytest.mark.llvm), pytest.param('LLVMExec', marks=pytest.mark.llvm)])
+ @pytest.mark.parametrize("vector_length", [2**x for x in range(1)])
def test_run_composition_vector(self, benchmark, mode, vector_length):
var = [1.0 for x in range(vector_length)];
comp = Composition()
|
Don't ignore 400 errors when creating ES indices
It can reveal tricky issues, and ignoring the error is not going
to help anyway if the index has not been created when we blindly
expect it to present later. | @@ -102,6 +102,6 @@ def create_index(index, config=None):
})
if not es.indices.exists(index):
- es.indices.create(index, body=config, ignore=400)
+ es.indices.create(index, body=config)
return index
|
fix(recipe): Delete lock node in queue recipe if entry already consumed
Delete lock node if already entry already consumed
Fix
Related: / | @@ -335,10 +335,19 @@ class LockingQueue(BaseQueue):
id=id_),
self.id,
ephemeral=True)
+ except NodeExistsError:
+ # Item is already locked
+ return None
+
+ try:
value, stat = self.client.retry(
self.client.get,
"{path}/{id}".format(path=self._entries_path, id=id_))
- except (NoNodeError, NodeExistsError):
- # Item is already consumed or locked
+ except NoNodeError:
+ # Item is already consumed
+ self.client.delete(
+ "{path}/{id}".format(
+ path=self._lock_path,
+ id=id_))
return None
return (id_, value)
|
Use abspath for external libs
If kept relative, forked guild cmds like 'package' that change
cwd will fail. | @@ -45,7 +45,7 @@ def main():
def _external_libs_path():
guild_pkg_dir = os.path.dirname(__file__)
- path = os.path.join(guild_pkg_dir, "external")
+ path = os.path.abspath(os.path.join(guild_pkg_dir, "external"))
if not os.path.exists(path):
import textwrap
sys.stderr.write("guild: {} does not exist\n".format(path))
|
Stop falling back on routing by session-id for smsforms
Now that I have confirmed the routing sms forms by user_id works | @@ -226,9 +226,8 @@ def _post_data(data, user_id):
"Content-Type": "application/json",
"content-length": str(len(data_bytes)),
"X-MAC-DIGEST": get_hmac_digest(settings.FORMPLAYER_INTERNAL_AUTH_KEY, data_bytes),
- # todo: stop defaulting to session-id
- "X-FORMPLAYER-SESSION": user_id or data.get('session-id'),
- }
+ "X-FORMPLAYER-SESSION": user_id,
+ },
)
if response.status_code == 404:
raise Http404(response.reason)
|
bug fix: binary test was not working when BUILDTEST_BINARY=True from configuration file, only worked from command line option --binary
Remove implementation for binary test when running buildtest build -s <module> since that is not needed since it is done automatically with
loaded modules. | @@ -130,20 +130,21 @@ def func_build_subcmd(args):
parent_dir)
builder.build()
- if args.binary:
+ # if binary test is True then generate binary test for all loaded modules
+ if config_opts["BUILDTEST_BINARY"]:
cmd = "module -t list"
out = subprocess.getoutput(cmd)
# output of module -t list when no modules are loaded is "No modules
# loaded"
if out != "No modules loaded":
out = out.split()
-
+ # for every loaded module generate binary test
for package in out:
generate_binary_test(package, args.verbose, "software")
+
if args.package:
func_build_system(args, logger, logdir, logpath, logfile)
- if args.software:
- func_build_software(args, logger, logdir, logpath, logfile)
+
def func_build_system(args, logger, logdir, logpath, logfile):
@@ -172,34 +173,6 @@ def func_build_system(args, logger, logdir, logpath, logfile):
print("Writing Log file to: ", destpath)
-def func_build_software(args, logger, logdir, logpath, logfile):
- """ This method implements option "buildtest build -s" which is
- used for building binary test for software modules.
- """
-
- print("Detecting Software: ", args.software)
-
- logger.debug("Generating Test from Application")
-
- # check if software is an easybuild applicationa
- if config_opts["BUILDTEST_EASYBUILD"] == True:
- is_easybuild_app(args.software)
-
- logdir=os.path.join(logdir,args.software)
-
- # if directory tree for software log is not present, create the directory
- create_dir(logdir)
-
- if config_opts["BUILDTEST_BINARY"]:
- generate_binary_test(args.software,args.verbose,"software")
-
- # moving log file from $BUILDTEST_LOGDIR/buildtest_%H_%M_%d_%m_%Y.log to
- # $BUILDTEST_LOGDIR/<module-name>/buildtest_%H_%M_%d_%m_%Y.log
- os.rename(logpath, os.path.join(logdir,logfile))
- logger.debug("Writing Log file to %s", os.path.join(logdir,logfile))
-
- print ("Writing Log file: ", os.path.join(logdir,logfile))
-
def clean_tests():
""" cleans all the tests in BUILDTEST_TESTDIR.
This implements "buildtest build --clean-tests"
|
print workaround for python 2
This fixes problems when piping to a file in python 2.
Resolves | @@ -27,6 +27,13 @@ from androguard.core.bytecodes import apk
from androguard.util import read
from androguard.cli import androarsc_main as main
+if sys.version_info.major == 2:
+ # Patch sys.stdout to avoid problems with unicode when piping
+ # See also https://wiki.python.org/moin/PrintFails for more information
+ import codecs
+ import locale
+ sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout);
+
if __name__ == "__main__":
parser = ArgumentParser(description="Decode resources.arsc either directly"
|
stream creation: Change the color of remove button in subscribers tab.
This button was red, which is appropriate when modifying an existing stream,
since that's a potentially disruptive action, but not appropriate in the context
of previewing subscribers for a new stream being created.
Fixes: | {{/if}}
<td>{{user_id}} </td>
<td>
- <button {{#if disabled}} disabled="disabled"{{/if}} data-user-id="{{user_id}}" class="remove_potential_subscriber button small rounded btn-danger">{{t 'Remove' }}</button>
+ <button {{#if disabled}} disabled="disabled"{{/if}} data-user-id="{{user_id}}" class="remove_potential_subscriber button small rounded white">{{t 'Remove' }}</button>
</td>
</tr>
|
[Fix]: complement necessary argument of seg_suffix of cityscapes
Issue:
A new entry `seg_suffix` is added for CustomDataset.__init__()
but missing in `self_coco = CocoDataset` in function `evaluate()`,
`mmdet/datasets/cityscapes.py`. | @@ -263,8 +263,9 @@ class CityscapesDataset(CocoDataset):
# create CocoDataset with CityscapesDataset annotation
self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,
None, self.data_root, self.img_prefix,
- self.seg_prefix, self.proposal_file,
- self.test_mode, self.filter_empty_gt)
+ self.seg_prefix, self.seg_suffix,
+ self.proposal_file, self.test_mode,
+ self.filter_empty_gt)
# TODO: remove this in the future
# reload annotations of correct class
self_coco.CLASSES = self.CLASSES
|
langkit.compiled_types: enhance ref-counting in builtin props comment
TN: | @@ -3381,8 +3381,11 @@ class ASTNodeType(BaseStructType):
' a reference to the parent.'
)),
- # The following builtin fields are implemented as a property, so
- # there is no need for an additional inc-ref.
+ # The following builtin fields are implemented as properties, so
+ # they follow the ref-counting protocol (function calls return a
+ # new ownership share). So unlike access to regular fields, they
+ # don't need an additional inc-ref (AbstractNodeData's
+ # access_needs_incref constructor argument).
('parents', PropertyDef(
expr=None, prefix=None, type=T.entity.array, public=True,
external=True, uses_entity_info=True, uses_envs=False,
|
Trivial: Update Zuul Status Page to correct URL
Current URL of Zuul Status Page in code is:
The correct URL must be:
Remove outdated Jenkins reference. | @@ -4,9 +4,8 @@ Continuous Integration with Jenkins
Each change made to Sahara core code is tested with unit and integration tests
and style checks using flake8.
-Unit tests and style checks are performed on public `OpenStack Jenkins
-<https://jenkins.openstack.org/>`_ managed by `Zuul
-<http://status.openstack.org/zuul/>`_.
+Unit tests and style checks are performed on public `OpenStack Zuul
+<http://zuul.openstack.org/>`_ instance.
Unit tests are checked using python 2.7.
|
Testsuite: make it possible to dump concrete syntax without compiling
TN: | @@ -55,6 +55,10 @@ if not langkit_root:
langkit_root = P.dirname(testsuite_dir)
+# When unparsing the concrete syntax, name of the file to write
+unparse_destination = 'concrete_syntax.lkt'
+
+
def prepare_context(grammar, lexer=None, warning_set=default_warning_set,
symbol_canonicalizer=None, show_property_logging=False):
"""
@@ -96,7 +100,8 @@ def prepare_context(grammar, lexer=None, warning_set=default_warning_set,
def emit_and_print_errors(grammar, lexer=None,
warning_set=default_warning_set,
- generate_unparser=False, symbol_canonicalizer=None):
+ generate_unparser=False, symbol_canonicalizer=None,
+ unparse_cs=False):
"""
Compile and emit code for CTX. Return the compile context if this was
successful, None otherwise.
@@ -114,16 +119,22 @@ def emit_and_print_errors(grammar, lexer=None,
Symbol canoncalizes to use for this context, if any.
:rtype: None|langkit.compile_context.CompileCtx
+
+ :param bool unparse_cs: If true, unparse the language to a concrete syntax
+ lkt file.
"""
if lexer is None:
from lexer_example import foo_lexer
lexer = foo_lexer
+ unparse_dest = unparse_destination if unparse_cs else None
+
try:
ctx = prepare_context(grammar, lexer, warning_set,
symbol_canonicalizer=symbol_canonicalizer)
- ctx.emit('build', generate_unparser=generate_unparser)
+ ctx.emit('build', generate_unparser=generate_unparser,
+ unparse_destination_file=unparse_dest)
# ... and tell about how it went
except DiagnosticError:
# If there is a diagnostic error, don't say anything, the diagnostics
@@ -232,7 +243,7 @@ def build_and_run(grammar, py_script=None, ada_main=None, lexer=None,
# RA22-015: Unparse the language to concrete syntax
if unparse_cs:
argv.append('--unparse-destination')
- argv.append('concrete_syntax.lkt')
+ argv.append(unparse_destination)
m.run(argv)
|
remove "tab" as delimiter in README.md
I remove "tab" as delimiter of result_settings in README.md.
Because this do not work correctly and we can use "\t" instead of "\tab". | @@ -48,7 +48,7 @@ Available parameters for `result_settings` are here.
- format: (string(csv|tsv), default csv)
- compression: (string(None|gz|bzip2), default None)
- header_line: (boolean(true|false), default true)
-- delimiter: (string(","|"\t"|"tab"), default ",")
+- delimiter: (string(","|"\t"), default ",")
- quote_policy: (string(ALL|MINIMAL|NONE))
- null_string: (string(""|"\N"|NULL|null), default "")
- newline: (string(CRLF|CR|LF), default CRLF)
|
Add music config to readme
Add Music config instructions | @@ -131,6 +131,8 @@ sudo ln -s /opt/arm/arm.yaml /etc/arm/
- Edit your "config" file (located at /opt/arm/arm.yaml) to determine what options you'd like to use. Pay special attention to the 'directory setup' section and make sure the 'arm' user has write access to wherever you define these directories.
+- Edit the music config file (located at /home/arm/.abcde.conf)
+
- To rip Blu-Rays after the MakeMKV trial is up you will need to purchase a license key or while MakeMKV is in BETA you can get a free key (which you will need to update from time to time) here: https://www.makemkv.com/forum2/viewtopic.php?f=5&t=1053 and create /home/arm/.MakeMKV/settings.conf with the contents:
app_Key = "insertlicensekeyhere"
|
[dagit] Fix Run page
Summary: When `stepKeys` is an empty array, we can end up in an infinite render loop (via `useEffect`) when rendering `RunWithData`.
Test Plan: View a run, verify that the page loads without a maximum call stack error.
Reviewers: bengotow, prha | @@ -182,7 +182,7 @@ const RunWithData: React.FunctionComponent<RunWithDataProps> = ({
}, [runtimeGraph, selectionQuery]);
React.useEffect(() => {
- if (!stepKeys || computeLogKey) {
+ if (!stepKeys?.length || computeLogKey) {
return;
}
@@ -195,7 +195,7 @@ const RunWithData: React.FunctionComponent<RunWithDataProps> = ({
);
});
setComputeLogKey(selectedLogKey || logKeys[0]);
- } else if (!computeLogKey || !stepKeys.includes(computeLogKey)) {
+ } else if (!stepKeys.includes(computeLogKey)) {
setComputeLogKey(selectionStepKeys.length === 1 ? selectionStepKeys[0] : stepKeys[0]);
} else if (selectionStepKeys.length === 1 && computeLogKey !== selectionStepKeys[0]) {
setComputeLogKey(selectionStepKeys[0]);
|
Update graph_models.rst
Add example usage of --arrow-shape in graph_models.rst | @@ -110,7 +110,7 @@ image by using the *graph_models* command::
::
- # Create a excluding certain models
+ # Create a graph excluding certain models
$ ./manage.py graph_models -a -X Foo,Bar -o my_project_sans_foo_bar.png
::
@@ -118,6 +118,12 @@ image by using the *graph_models* command::
# Create a graph without showing its edges' labels
$ ./manage.py graph_models -a --hide-edge-labels -o my_project_sans_foo_bar.png
+::
+
+ # Create a graph with 'normal' arrow shape for relations
+ $ ./manage.py graph_models -a --arrow-shape normal -o my_project_sans_foo_bar.png
+
+
.. _GraphViz: http://www.graphviz.org/
.. _pygraphviz: https://pygraphviz.github.io/
|
dist_type --> cdf_target
dist_params --> cdf_target_params | @@ -13,9 +13,9 @@ from UQpy.Distributions import *
class SROM:
- def __init__(self, samples=None, dist_type=None, moments=None, weights_errors=None,
+ def __init__(self, samples=None, cdf_target=None, moments=None, weights_errors=None,
weights_distribution=None, weights_moments=None, weights_correlation=None,
- properties=None, dist_params=None, correlation=None):
+ properties=None, cdf_target_params=None, correlation=None):
"""
Stochastic Reduced Order Model(SROM) provide a low-dimensional, discrete approximation of a given random
quantity.
@@ -32,11 +32,11 @@ class SROM:
:param samples: A list of samples corresponding to each random variables
:type samples: list
- :param dist_type: A list of Cumulative distribution functions of random variables
- :type dist_type: list str or list function
+ :param cdf_target: A list of Cumulative distribution functions of random variables
+ :type cdf_target: list str or list function
- :param dist_params: Parameters of distribution
- :type dist_params: list
+ :param cdf_target_params: Parameters of distribution
+ :type cdf_target_params: list
:param moments: A list containing first and second order moment about origin of all random variables
:type moments: list
@@ -93,14 +93,14 @@ class SROM:
self.samples = np.array(samples)
self.correlation = np.array(correlation)
- self.dist_type = dist_type
+ self.cdf_target = cdf_target
self.moments = np.array(moments)
self.weights_errors = weights_errors
self.weights_distribution = weights_distribution
self.weights_moments = weights_moments
self.weights_correlation = weights_correlation
self.properties = properties
- self.dist_params = dist_params
+ self.cdf_target_params = cdf_target_params
self.dimension = self.samples.shape[1]
self.nsamples = self.samples.shape[0]
self.init_srom()
@@ -160,8 +160,8 @@ class SROM:
p_ = optimize.minimize(f, np.zeros(self.nsamples),
args=(self.samples, self.weights_distribution, self.weights_moments,
- self.weights_correlation, self.dist_type, self.nsamples, self.dimension,
- self.moments, self.weights_errors, self.dist_params, self.properties,
+ self.weights_correlation, self.cdf_target, self.nsamples, self.dimension,
+ self.moments, self.weights_errors, self.cdf_target_params, self.properties,
self.correlation),
constraints=cons, method='SLSQP')
@@ -169,10 +169,10 @@ class SROM:
def init_srom(self):
- if self.dist_type is None:
+ if self.cdf_target is None:
raise NotImplementedError("Exit code: Distribution not defined.")
- self.dist_type = cdf(self.dist_type)
+ self.cdf_target = cdf(self.cdf_target)
# Check samples
if self.samples is None:
@@ -241,10 +241,10 @@ class SROM:
raise NotImplementedError("Size of 'weights for correlation' is not correct")
# Check cdf_type
- if len(self.dist_type) == 1:
- self.dist_type = self.dist_type * self.dimension
- self.dist_params = [self.dist_params] * self.dimension
- elif len(self.dist_type) != self.dimension:
+ if len(self.cdf_target) == 1:
+ self.cdf_target = self.cdf_target * self.dimension
+ self.cdf_target_params = [self.cdf_target_params] * self.dimension
+ elif len(self.cdf_target) != self.dimension:
raise NotImplementedError("Size of cdf_type should be 1 or equal to dimension")
|
Remove note about py27
we don't want to use .clear() as references to that list will keep their old data | @@ -293,8 +293,7 @@ class Query:
data = self.results.get('data')
self.subed += len(data)
- self.uniq.clear()
- # no list.clear() in py27
+ self.uniq = {}
self.results['data'] = []
return data
|
Handle return value from multiple check suites
Handles the return status of multiple check suites, returning non-zero
if any of the check suites did not pass and/or if unhandled
compliance-checker errors. Possible fix for | @@ -48,6 +48,7 @@ class ComplianceChecker(object):
@returns If the tests failed (based on the criteria)
"""
+ all_groups = []
cs = CheckSuite()
# using OrderedDict is important here to preserve the order
# of multiple datasets which may be passed in
@@ -87,7 +88,8 @@ class ComplianceChecker(object):
for out_fmt in output_format:
if out_fmt == 'text':
if output_filename == '-':
- groups = cls.stdout_output(cs, score_dict, verbose, limit)
+ all_groups.append(cls.stdout_output(cs, score_dict,
+ verbose, limit))
# need to redirect output from stdout since print functions are
# presently used to generate the standard report output
else:
@@ -96,29 +98,30 @@ class ComplianceChecker(object):
output_filename = '{}.txt'.format(os.path.splitext(output_filename)[0])
with io.open(output_filename, 'w', encoding='utf-8') as f:
with stdout_redirector(f):
- groups = cls.stdout_output(cs, score_dict, verbose,
- limit)
+ all_groups.append(cls.stdout_output(cs, score_dict,
+ verbose, limit))
elif out_fmt == 'html':
# Update file name if needed
if len(output_format) > 1 and output_filename != '-':
output_filename = '{}.html'.format(os.path.splitext(output_filename)[0])
- groups = cls.html_output(cs, score_dict, output_filename, ds_loc,
- limit)
+ all_groups.append(cls.html_output(cs, score_dict, output_filename, ds_loc,
+ limit))
elif out_fmt == 'json' or 'json_new':
# Update file name if needed
if len(output_format) > 1 and output_filename != '-':
output_filename = '{}.json'.format(os.path.splitext(output_filename)[0])
- groups = cls.json_output(cs, score_dict, output_filename, ds_loc,
- limit, out_fmt)
+ all_groups.append(cls.json_output(cs, score_dict, output_filename, ds_loc,
+ limit, out_fmt))
else:
raise TypeError('Invalid format %s' % out_fmt)
errors_occurred = cls.check_errors(score_groups, verbose)
- return cs.passtree(groups, limit), errors_occurred
+ return (all(cs.passtree(groups, limit) for groups in all_groups),
+ errors_occurred)
@classmethod
def stdout_output(cls, cs, score_dict, verbose, limit):
|
Catch NamespaceNotFoundError so we can give more useful error
Also removed some code that wasn't being used. | @@ -6,7 +6,11 @@ from robot import run as robot_run
from robot import pythonpathsetter
from robot.testdoc import testdoc
-from cumulusci.core.exceptions import RobotTestFailure, TaskOptionsError
+from cumulusci.core.exceptions import (
+ RobotTestFailure,
+ TaskOptionsError,
+ NamespaceNotFoundError,
+)
from cumulusci.core.tasks import BaseTask
from cumulusci.core.utils import process_bool_arg
from cumulusci.core.utils import process_list_arg
@@ -105,13 +109,15 @@ class Robot(BaseSalesforceTask):
# get_namespace will potentially download sources that have
# yet to be downloaded. We'll then add them to PYTHONPATH
- # before running. I can't just add it to sys.path because pabot
- # processes won't see it.
- options["pythonpath"] = []
+ # before running, though we have to do it one way for pabot
+ # and another for robot.
source_paths = {}
for source in self.options["sources"]:
+ try:
source_config = self.project_config.get_namespace(source)
source_paths[source] = source_config.repo_root
+ except NamespaceNotFoundError:
+ raise TaskOptionsError(f"robot source '{source}' could not be found")
if self.options["processes"] > 1:
cmd = [
|
make a simpler to avoid file leaks
Appending the complete command can | @@ -458,11 +458,12 @@ def main(argv):
min_size=options.min_size)
with h5py.File(output_name, mode="r+") as h5f:
- # append the convert command to the creator attribute, for NeXus files
- creator = h5f.attrs.get("creator", b"").decode()
- convert_command = " ".join(argv)
- if convert_command not in creator:
+ # append "silx convert" to the creator attribute, for NeXus files
+ previous_creator = h5f.attrs.get("creator", b"").decode()
+ creator = "silx convert (v%s)" % silx.version
+ # only if it not already there
+ if creator not in previous_creator:
h5f.attrs["creator"] = \
- numpy.string_(creator + "; convert command: %s" % " ".join(argv))
+ numpy.string_(previous_creator + "; " + creator)
return 0
|
[varLib] Allow sparse masters when merging hinting
Part of
Part of | @@ -374,7 +374,7 @@ def _merge_TTHinting(font, model, master_ttfs, tolerance=0.5):
all_pgms = [
m["glyf"][name].program
for m in master_ttfs
- if hasattr(m["glyf"][name], "program")
+ if name in m['glyf'] and hasattr(m["glyf"][name], "program")
]
if not any(all_pgms):
continue
@@ -385,6 +385,7 @@ def _merge_TTHinting(font, model, master_ttfs, tolerance=0.5):
font_pgm = Program()
if any(pgm != font_pgm for pgm in all_pgms if pgm):
log.warning("Masters have incompatible glyph programs in glyph '%s', hinting is discarded." % name)
+ # TODO Only drop hinting from this glyph.
_remove_TTHinting(font)
return
|
Make USER_DIR available outside function context,
avoid throwing an error when os.getenv("USER") is None,
e.g. under Android | @@ -35,15 +35,17 @@ def env(name):
return os.environ.get( name, '' )
def get_home_dir():
- """Return the user's home directory"""
+ """Return the user's home directory and name"""
if OS != "Windows":
# set home directory from user name for Mac and Linux when started as user or
# sudo user
- USERNAME = os.getenv('SUDO_USER') or os.getenv('USER')
- home_dir = os.path.expanduser('~'+USERNAME)
+ user_name = os.getenv('SUDO_USER') or os.getenv('USER')
+ if user_name is None:
+ user_name = ""
+ home_dir = os.path.expanduser('~'+user_name)
else:
# same for windows
- USERNAME = os.getenv('USER')
+ user_name = os.getenv('USER')
# home_dir = os.path.expanduser(os.getenv('USERPROFILE'))
home_dir = env( 'USERPROFILE' )
if not valid(home_dir):
@@ -56,9 +58,9 @@ def get_home_dir():
home_dir += '\\'
if not valid(home_dir) :
home_dir = 'C:\\'
- return home_dir
+ return home_dir, user_name
-HOME_DIR = get_home_dir()
+HOME_DIR, USER_NAME = get_home_dir()
#------------------------------------------------------------------------------
TEMP_DIR = tempfile.gettempdir()
|
Removing an erroneous comment
Left a code-comment in a change by accident, updating to remove it. | @@ -7,7 +7,7 @@ set -o xtrace
if [ "$DISTRO_NAME" == "ubuntu" ] && [ "$DIB_RELEASE" == "trusty" ]; then
echo deb http://archive.ubuntu.com/ubuntu trusty-backports main restricted universe multiverse > /etc/apt/sources.list.d/backports.list
elif [ "$DISTRO_NAME" == "centos7" ]; then
- case $ARCH in ## Change this to an environment variable from DIB?!
+ case $ARCH in
x86_64|amd64)
BASEURL=http://mirror.centos.org/\$contentdir/\$releasever/paas/\$basearch/openshift-origin/
;;
|
Added custom jvp for np.linalg.det
For faster gradient computation for determinants, added closed-form expressing for Jacobian-vector product. Still needs a test. | @@ -86,9 +86,11 @@ def slogdet(a):
@_wraps(onp.linalg.det)
+@custom_transforms
def det(a):
sign, logdet = slogdet(a)
return sign * np.exp(logdet)
+defjvp(det, lambda g, ans, x: np.trace(np.dot(g, np.linalg.inv(x)))*ans)
@_wraps(onp.linalg.eig)
|
Using edge weight in `AddLaplacianEigenvectorPE` transform
According to issue | @@ -69,6 +69,7 @@ class AddLaplacianEigenvectorPE(BaseTransform):
num_nodes = data.num_nodes
edge_index, edge_weight = get_laplacian(
data.edge_index,
+ data.edge_weight,
normalization='sym',
num_nodes=num_nodes,
)
|
fix: Now it's possible to init empty scheduler
(without any parameters). | @@ -66,7 +66,7 @@ class Scheduler:
_logger_basename = "atlas.scheduler"
parameters = GLOBAL_PARAMETERS # interfacing the global parameters. TODO: Support for multiple schedulers
- def __init__(self, tasks, maintainer_tasks=None,
+ def __init__(self, tasks=None, maintainer_tasks=None,
startup_tasks=None, shutdown_tasks=None,
shut_condition=None,
min_sleep=0.1, max_sleep=600,
@@ -75,13 +75,17 @@ class Scheduler:
"""[summary]
Arguments:
- tasks {[type]} -- [description]
+ tasks {List[Task|str]} -- List of tasks (or task names) that are run on normal sequence
+ maintainer_tasks {List[Task|str]} -- List of tasks (or task names) that are run as maintainers
+ startup_tasks {List[Task|str]} -- List of tasks (or task names) that are run on start up
+ shutdown_tasks {List[Task|str]} -- List of tasks (or task names) that are run on shut down
Keyword Arguments:
maintain_cond {Condition} -- Condition to kick maintaining on (default: {None})
shut_cond {[type]} -- Condition to shut down scheduler (default: {None})
"""
- self.tasks = tasks
+ # TODO: Accept tasks, maintainer_tasks etc. also as strings (names of the tasks)
+ self.tasks = [] if tasks is None else tasks
self.maintainer_tasks = [] if maintainer_tasks is None else maintainer_tasks
self.startup_tasks = startup_tasks
self.shutdown_tasks = shutdown_tasks
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.