message
stringlengths
13
484
diff
stringlengths
38
4.63k
Ignore all lines of subsequent hunks until last one is found Git version 2.11.1+ introduced extra lines into the subsequent hunk sections for incremental blame output. The documentation notes that parsers of this output should ignore all lines between the start and end for robust parsing.
@@ -713,11 +713,14 @@ class Repo(object): committed_date=int(props[b'committer-time'])) commits[hexsha] = c else: - # Discard the next line (it's a filename end tag) + # Discard all lines until we find "filename" which is + # guaranteed to be the last line + while True: line = next(stream) tag, value = line.split(b' ', 1) - assert tag == b'filename', 'Unexpected git blame output' + if tag == b'filename': orig_filename = value + break yield BlameEntry(commits[hexsha], range(lineno, lineno + num_lines),
Fix alerter test with correct format Removes the hostname of the monitor if it's local; this was always the intended behaviour (although the test didn't properly reflect that), and the behaviour was fixed in an earlier commit, caught by linting.
@@ -495,13 +495,12 @@ class TestMessageBuilding(unittest.TestCase): ), textwrap.dedent( """ - Monitor test on {hostname} failed! + Monitor test failed! Failed at: {expected_time} (down 0+00:00:00) Virtual failure count: 1 Additional info: This monitor always fails. Description: A monitor which always fails. """.format( - hostname=util.short_hostname(), expected_time=self.expected_time_string, ) ), @@ -517,14 +516,13 @@ class TestMessageBuilding(unittest.TestCase): ), textwrap.dedent( """ - Monitor test on {hostname} failed! + Monitor test failed! Failed at: {expected_time} (down 0+00:00:00) Virtual failure count: 1 Additional info: This monitor always fails. Description: A monitor which always fails. Documentation: whoops """.format( - hostname=util.short_hostname(), expected_time=self.expected_time_string, ) ), @@ -541,12 +539,11 @@ class TestMessageBuilding(unittest.TestCase): ), textwrap.dedent( """ - Monitor winning on {hostname} succeeded! + Monitor winning succeeded! Recovered at: {expected_time} (was down for 0+00:00:00) Additional info: Description: (Monitor did not write an auto-biography.) """.format( # noqa: W291 - hostname=util.short_hostname(), expected_time=self.expected_time_string, ) ),
Lexical envs: comment Get_Internal.Get_Elements TN:
@@ -492,6 +492,12 @@ package body Langkit_Support.Lexical_Env is use Internal_Envs; function Get_Elements (Env : Lexical_Env) return Boolean; + -- Lookup for matching elements in Env's internal map and append them to + -- Local_Results. Return whether we found some. + + ------------------ + -- Get_Elements -- + ------------------ function Get_Elements (Env : Lexical_Env) return Boolean is C : Cursor := Internal_Envs.No_Element;
Fix owner signing is own removal Closes
@@ -103,11 +103,13 @@ class SafeMultisigTransactionListView(ListAPIView): ethereum_client = EthereumClientProvider() safe = Safe(address, ethereum_client=ethereum_client) - # Check operation type matches condition (hash_approved -> confirmation, nonce -> execution) - if not safe.retrieve_is_owner(sender): + # Check owners and old owners, owner might be removed but that tx can still be signed by that owner + if (not safe.retrieve_is_owner(sender) and + not safe.retrieve_is_owner(sender, block_identifier=ethereum_client.current_block_number - 100)): return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data='User is not an owner') else: + # Check operation type matches condition (hash_approved -> confirmation, nonce -> execution) if not (safe.retrieve_is_hash_approved(sender, safe_tx_hash) or safe.retrieve_nonce() > data['nonce']): return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY,
Update how-to-run-CEA-optimization.rst change to the right graph
@@ -22,11 +22,11 @@ The results from the optimization is a collection of pareto-optimum solutions. Optimization Variables ---------------------- Beside the objectives, the main outputs from the optimization is the energy supply system configurations. -Each perato-optimum solution implies a unique energy supply system configuration. A configuration is combination of +Each pareto-optimum solution implies a unique energy supply system configuration. A configuration is combination of energy supply technologies and sizes. All possible configurations that is incorporated in the CEA is presented in this figure: -.. image:: flowchart_thermal_electric_network_planning.png +.. image:: supply_system_superstructure.png :align: center Supply technology selection and sizing
Extract generic onChanged invocation logic to _callOnChanged and call it from both PropertyStorage and alias property code Fixes
@@ -522,10 +522,8 @@ PropertyStoragePrototype.set = function(object, name, newValue, defaultValue, ca this.callOnChanged(object, name, newValue, oldValue) } -PropertyStoragePrototype.callOnChanged = function(object, name, value) { +var _callOnChanged = function(object, name, value, handlers) { var protoCallbacks = object['__changed__' + name] - var handlers = this.onChanged - var hasProtoCallbacks = protoCallbacks !== undefined var hasHandlers = handlers !== undefined @@ -541,6 +539,10 @@ PropertyStoragePrototype.callOnChanged = function(object, name, value) { handlers.forEach(invoker) } +PropertyStoragePrototype.callOnChanged = function(object, name, value) { + _callOnChanged(object, name, value, this.onChanged) +} + PropertyStoragePrototype.removeOnChanged = function(callback) { var handlers = this.onChanged var idx = handlers.indexOf(callback) @@ -704,6 +706,8 @@ exports.addAliasProperty = function(object, name, getObject, srcProperty) { var storage = object.__properties[name] if (storage !== undefined) storage.callOnChanged(object, name, value) + else + _callOnChanged(object, name, value) //call prototype handlers }) Object.defineProperty(object, name, {
updated filter_profiles to handle arrays in values key_values might now contain arrays. Useful for filtering on 'families'
@@ -79,11 +79,11 @@ def fullmatch(regex, string, flags=0): return None -def validate_value_by_regexes(value, in_list): +def validate_value_by_regexes(values, in_list): """Validates in any regex from list match entered value. Args: - value (str): String where regexes is checked. + values (str|list): String where regexes is checked. in_list (list): List with regexes. Returns: @@ -102,11 +102,15 @@ def validate_value_by_regexes(value, in_list): # If value is not set and in list has specific values then resolve value # as not matching. - if not value: + if not values: return -1 + if isinstance(values, str): + values = [values] + regexes = compile_list_of_regexes(in_list) for regex in regexes: + for value in values: if hasattr(regex, "fullmatch"): result = regex.fullmatch(value) else: @@ -136,7 +140,8 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): Args: profiles_data (list): Profile definitions as dictionaries. - key_values (dict): Mapping of Key <-> Value. Key is checked if is + key_values (dict): Mapping of Key <-> Value|[Value]. + Key is checked if is available in profile and if Value is matching it's values. keys_order (list, tuple): Order of keys from `key_values` which matters only when multiple profiles have same score. @@ -181,12 +186,12 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): profile_scores = [] for key in keys_order: - value = key_values[key] - match = validate_value_by_regexes(value, profile.get(key)) + values = key_values[key] + match = validate_value_by_regexes(values, profile.get(key)) if match == -1: profile_value = profile.get(key) or [] logger.debug( - "\"{}\" not found in \"{}\": {}".format(value, key, + "\"{}\" not found in \"{}\": {}".format(values, key, profile_value) ) profile_points = -1
docs(customization): update cookicutter template url commitizen_cz_template is moved from Lee-W to commitizen-tools
@@ -14,10 +14,10 @@ Check an [example](convcomms) on how to configure `BaseCommitizen`. You can also automate the steps above through [cookiecutter](https://cookiecutter.readthedocs.io/en/1.7.0/). ```sh -cookiecutter gh:Lee-W/commitizen_cz_template +cookiecutter gh:commitizen-tools/commitizen_cz_template ``` -See [Lee-W/commitizen_cz_template](https://github.com/Lee-W/commitizen_cz_template) for detail. +See [commitizen_cz_template](https://github.com/commitizen-tools/commitizen_cz_template) for detail. ### Custom commit rules
TEST: include test_granger_fails_on_zero_lag issue Include test that the appropriate exception is raised whe the wrong list of arguments is passed
@@ -481,6 +481,13 @@ class TestGrangerCausality(object): with pytest.raises(ValueError, match="x contains NaN"): grangercausalitytests(x, 2) + def test_granger_fails_on_zero_lag(self, reset_randomstate): + x = np.random.rand(1000, 2) + with pytest.raises( + ValueError, + match="maxlag must be a non-empty list containing only positive integers"): + grangercausalitytests(x, [0, 1, 2]) + class TestKPSS: """
Add base class & test launcher for web file upload Throw web.webcore:upload:test on your commandline, and you get an /upload_test URL where you can upload a file. The base class just logs the filename, size, and MD5 hash, but you can easily subclass it to do something more interesting.
@@ -660,6 +660,71 @@ class InternalContentHandler (SplitRequestHandler): self.wfile.write(r) +class FileUploadHandler (SplitRequestHandler): + """ + A default page to say hi from POX. + """ + def do_GET (self): + """Serve a GET request.""" + self.send_form(True) + + def do_HEAD (self): + """Serve a HEAD request.""" + self.send_form(False) + + def send_form (self, is_get = False, msg = None): + r = "<html><head><title>POX</title></head>\n" + r += "<body>\n<h1>POX File Upload</h1>\n" + if msg: + r += msg + r += "\n<hr />\n" + r += "<form method='POST' enctype='multipart/form-data' action='?'>\n" + r += "File to upload: <input type='file' name='upload'>\n" + r += "<input type='submit' value='Upload!' /></form>\n" + r += "</body></html>\n" + + self.send_response(200) + self.send_header("Content-type", "text/html") + self.send_header("Content-Length", str(len(r))) + self.end_headers() + if is_get: + self.wfile.write(r) + + def do_POST (self): + mime,params = cgi.parse_header(self.headers.getheader('content-type')) + if mime != 'multipart/form-data': + self.send_error(400, "Expected form data") + return + #query = cgi.parse_multipart(self.rfile, params) + #data = query.get("upload") + data = cgi.FieldStorage( fp = self.rfile, headers = self.headers, environ={ 'REQUEST_METHOD':'POST' } ) + if not data or "upload" not in data: + self.send_error(400, "Expected upload data") + return + uploadfield = data["upload"] + + msg = self.on_upload(uploadfield.filename, uploadfield.file) + + self.send_form(True, msg=msg) + + def on_upload (self, filename, datafile): + data = datafile.read() + import hashlib + h = hashlib.md5() + h.update(data) + hc = h.hexdigest() + msg = "Received file '%s'. bytes:%s md5:%s" % (filename, len(data), hc) + log.warn(msg) + return msg + + +def upload_test (): + """ + Launch a file upload test + """ + core.WebServer.set_handler("/upload_test", FileUploadHandler) + + def launch (address='', port=8000, static=False, ssl_server_key=None, ssl_server_cert=None, ssl_client_certs=None): def expand (f):
Remove dashboard_frontend_vip from the ceph mgr template The purpose of this change is to just remove the dashboard frontend vip parameter that is now computed on tripleo-ansible according to the haproxy frontend network defined for the ceph dashboard service. Depends-On:
@@ -83,7 +83,6 @@ resources: dashboard_rgw_api_scheme: {get_param: [EndpointMap, CephRgwInternal, protocol]} dashboard_rgw_api_no_ssl_verify: false dashboard_port: {get_param: CephDashboardPort} - dashboard_frontend_vip: {get_param: [EndpointMap, CephDashboardInternal, host]} dashboard_admin_user_ro: {get_param: CephDashboardAdminRO} outputs:
Make the ScopeExtractorProcessor usable for the Primary Identifier This patch adds support to use the ScopeExtractorProcessor on the Primary Identifiert which is, in contrast to the other values, a string. Closes
@@ -31,6 +31,8 @@ class ScopeExtractorProcessor(BaseProcessor): values = attributes.get(attribute, []) if not values: raise AttributeProcessorWarning("Cannot apply scope_extractor to {}, it has no values".format(attribute)) + if not isinstance(values, list): + values = [values] if not any('@' in val for val in values): raise AttributeProcessorWarning("Cannot apply scope_extractor to {}, it's values are not scoped".format(attribute)) for value in values:
improve unit tests for lru_cache This patch restructures and adds unit tests for types.lru_cache.
from nutils.testing import * import nutils.types import inspect, pickle, itertools, ctypes, stringly, tempfile, io, os -import numpy, weakref +import numpy, weakref, contextlib class apply_annotations(TestCase): @@ -1028,46 +1028,82 @@ class lru_cache(TestCase): super().setUp() self.func.cache.clear() + class obj(nutils.types.Immutable): + 'weak referencable object' + @nutils.types.lru_cache(maxsize=2) def func(self, *args): self.called = True + return self.obj() + @contextlib.contextmanager def assertCached(self, *args): self.called = False - self.func(*args) + yield self.assertFalse(self.called) + @contextlib.contextmanager def assertNotCached(self, *args): self.called = False - self.func(*args) + yield self.assertTrue(self.called) def test_lru(self): - self.assertNotCached(1) - self.assertNotCached(2) - self.assertCached(1) - self.assertCached(2) - self.assertNotCached(3) # drops 1 - self.assertNotCached(1) - self.assertCached(3) + with self.assertNotCached(): + self.func(1) + with self.assertNotCached(): + self.func(2) + with self.assertCached(): + self.func(1) + with self.assertCached(): + self.func(2) + with self.assertNotCached(): + self.func(3) # drops 1 + with self.assertNotCached(): + self.func(1) + with self.assertCached(): + self.func(3) + + def test_array_identification(self): + a = numpy.array([1,2,3,4]) + a.flags.writeable = False + with self.assertNotCached(): + self.func(a[1:][:-1]) + with self.assertCached(): + self.func(a[:-1][1:]) - def test_array(self): + def test_destruction_arrays(self): a = numpy.array([1,2,3,4]) a.flags.writeable = False - self.assertNotCached(a[1:][:-1]) - self.assertCached(a[:-1][1:]) + b = numpy.array([5,6,7,8]) + b.flags.writeable = False + with self.assertNotCached(): + ret_ = weakref.ref(self.func(a, b)) + with self.assertCached(): + self.assertIs(ret_(), self.func(a, b)) + del a + self.assertIs(ret_(), None) - def test_callback(self): + def test_destruction_array_obj(self): a = numpy.array([1,2,3,4]) a.flags.writeable = False - class dummy: pass - b = dummy() - r = weakref.ref(b) - self.assertNotCached(a, b) + b = self.obj() + b_ = weakref.ref(b) + with self.assertNotCached(): + ret_ = weakref.ref(self.func(a, b)) del b - self.assertIsNot(r(), None) + self.assertIsNot(b_(), None) + self.assertIsNot(ret_(), None) del a - self.assertIs(r(), None) + self.assertIs(b_(), None) + self.assertIs(ret_(), None) + + def test_mutable(self): + a = numpy.array([1,2,3,4]) + with self.assertNotCached(): + self.func(a) + with self.assertNotCached(): + self.func(a) class hashable_function(TestCase):
add note about need to install ffmpeg/libav Was messing for hours with error messages, and trying to figure out what was wrong just because I didn't notice the warning about ffmpeg/libav.
@@ -12,6 +12,7 @@ If you have OS X and `Homebrew`_ you can install with: :: brew install svtplay-dl + Make sure you notice that you need to run `brew install ffmpeg` or `brew install libav` afterwards, if you don't already have one of these packages. Debian and Ubuntu ~~~~~~~~~~~~~~~~~
Pin dependencies Use "compatible release" version specifiers for the dnaio and xopen dependencies. Not using the ">=" operator should avoid suprises when dependencies are updated. See Close
@@ -102,7 +102,7 @@ setup( package_dir={'': 'src'}, packages=find_packages('src'), entry_points={'console_scripts': ['cutadapt = cutadapt.__main__:main']}, - install_requires=['dnaio>=0.3', 'xopen>=0.7.3'], + install_requires=['dnaio~=0.3.0', 'xopen~=0.8.1'], extras_require={ 'dev': ['Cython', 'pytest', 'pytest-timeout', 'sphinx', 'sphinx_issues'], },
revert: change to pip caching let's revisit this optimization in a different PR, since there is no explicit requirements.txt in the project as such. We will need a slighly different approach with this. Authored-by: Vinit Kumar
@@ -99,7 +99,6 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python }} - cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip @@ -147,7 +146,6 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python }} - cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip @@ -191,7 +189,6 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python }} - cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip @@ -233,7 +230,6 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python }} - cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip @@ -280,7 +276,6 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python }} - cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip @@ -331,7 +326,6 @@ jobs: uses: actions/setup-python@v3 with: python-version: ${{ matrix.python }} - cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip
Update bpim2zero.py Support for pins PL2 and PL4
@@ -17,6 +17,49 @@ UART2_TX = pin.PA0 PA3 = pin.PA3 UART2_CTS = pin.PA3 PA10 = pin.PA10 +PA12 = pin.PA12 +SDA = pin.PA12 +PA11 = pin.PA11 +SCL = pin.PA11 +PA6 = pin.PA6 +PWM1 = pin.PA6 +PA1 = pin.PA1 +UART2_RX = pin.PA1 +PA0 = pin.PA0 +UART2_TX = pin.PA0 +PA3 = pin.PA3 +UART2_CTS = pin.PA3 +PA7 = pin.PA7 +PA8 = pin.PA8 +PA9 = pin.PA9 +PA10 = pin.PA10 +PA17 = pin.PA17 +PA18 = pin.PA18 +PA19 = pin.PA19 +PA20 = pin.PA20 +PA21 = pin.PA21 +PC0 = pin.PC0 +PC1 = pin.PC1 +PC2 = pin.PC2 +PC3 = pin.PC3 +PC4 = pin.PC4 +PC7 = pin.PC7 + +PA13 = pin.PA13 +SPI1_CS = pin.PA13 +PA14 = pin.PA14 +SPI1_CLK = pin.PA14 +PA2 = pin.PA2 +UART2_RTS = pin.PA2 +PA18 = pin.PA18 +TWI1_SCK = pin.PA18 + +PL2 = pin.PL2 +PL4 = pin.PL4 + +SCLK = pin.PA14 +MOSI = pin.PA15 +MISO = pin.PA16 PA13 = pin.PA13 SPI1_CS = pin.PA13
Updated Docs fixed broken link
@@ -207,7 +207,7 @@ For libraries it is not necessary to commit the lock file. ### Installing dependencies only -The current project is installed in [editable](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs) mode by default. +The current project is installed in [editable](https://pip.pypa.io/en/stable/cli/pip_install/#install-editable) mode by default. If you want to install the dependencies only, run the `install` command with the `--no-root` flag:
Trivial follow up to addition of last modified in container listings Trivial fixes in follow up to [1] give container unique timestamps in listing test remove accidental line duplication add missing last modified time assertion [1] Related-Change:
@@ -1106,8 +1106,8 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) req.get_response(self.controller) - put_timestamp = normalize_timestamp(0) for c in range(5): + put_timestamp = normalize_timestamp(c + 1) req = Request.blank( '/sda1/p/a/c%d' % c, environ={'REQUEST_METHOD': 'PUT'}, @@ -1121,22 +1121,21 @@ class TestAccountController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 200) - timestamp_str = Timestamp(put_timestamp).isoformat expected = [{'count': 2, 'bytes': 3, 'name': 'c0', - 'last_modified': timestamp_str}, + 'last_modified': Timestamp('1').isoformat}, {'count': 2, 'bytes': 3, 'name': 'c1', - 'last_modified': timestamp_str}, + 'last_modified': Timestamp('2').isoformat}, {'count': 2, 'bytes': 3, 'name': 'c2', - 'last_modified': timestamp_str}] + 'last_modified': Timestamp('3').isoformat}] self.assertEqual(json.loads(resp.body), expected) req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=json', environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 200) expected = [{'count': 2, 'bytes': 3, 'name': 'c3', - 'last_modified': timestamp_str}, + 'last_modified': Timestamp('4').isoformat}, {'count': 2, 'bytes': 3, 'name': 'c4', - 'last_modified': timestamp_str}] + 'last_modified': Timestamp('5').isoformat}] self.assertEqual(json.loads(resp.body), expected) def test_GET_limit_marker_xml(self): @@ -1181,7 +1180,6 @@ class TestAccountController(unittest.TestCase): self.assertEqual(sorted([n.nodeName for n in container]), ['bytes', 'count', 'last_modified', 'name']) node = [n for n in container if n.nodeName == 'name'][0] - node = [n for n in container if n.nodeName == 'name'][0] self.assertEqual(node.firstChild.nodeValue, 'c2') node = [n for n in container if n.nodeName == 'count'][0] self.assertEqual(node.firstChild.nodeValue, '2') @@ -1223,6 +1221,9 @@ class TestAccountController(unittest.TestCase): self.assertEqual(node.firstChild.nodeValue, '2') node = [n for n in container if n.nodeName == 'bytes'][0] self.assertEqual(node.firstChild.nodeValue, '3') + node = [n for n in container if n.nodeName == 'last_modified'][0] + self.assertEqual(node.firstChild.nodeValue, + Timestamp('5').isoformat) def test_GET_accept_wildcard(self): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
Add explicit links to definition terms This is useful if we want to explain e.g. what a "sample" is. I haven't looked to see if it messes up formatting, but I hope not.
@@ -28,12 +28,16 @@ Definitions To begin, here are definitions of some key ideas encountered later. +.. _sec_data_model_definitions_tree: + tree A "gene tree", i.e., the genealogical tree describing how a collection of genomes (usually at the tips of the tree) are related to each other at some chromosomal location. See :ref:`sec_nodes_or_individuals` for discussion of what a "genome" is. +.. _sec_data_model_definitions_tree_sequence: + tree sequence A "succinct tree sequence" (or tree sequence, for brevity) is an efficient encoding of a sequence of correlated trees, such as one encounters looking @@ -41,6 +45,8 @@ tree sequence structure shared by adjacent trees, (essentially) storing only what differs between them. +.. _sec_data_model_definitions_node: + node Each branching point in each tree is associated with a particular genome in a particular ancestor, called "nodes". Since each node represents a @@ -48,6 +54,8 @@ node which determines the height of any branching points it is associated with. See :ref:`sec_nodes_or_individuals` for discussion of what a "node" is. +.. _sec_data_model_definitions_individual: + individual In certain situations we are interested in how nodes (representing individual homologous genomes) are grouped together into individuals @@ -56,6 +64,8 @@ individual individual rather than duplicate this information on the constituent nodes. See :ref:`sec_nodes_or_individuals` for more discussion on this point. +.. _sec_data_model_definitions_sample: + sample The focal nodes of a tree sequence, usually thought of as those that we have obtained data from. The specification of these affects various @@ -66,18 +76,24 @@ sample for information on how the sample status a node is encoded in the ``flags`` column.) +.. _sec_data_model_definitions_edge: + edge The topology of a tree sequence is defined by a set of **edges**. Each edge is a tuple ``(left, right, parent, child)``, which records a parent-child relationship among a pair of nodes on the on the half-open interval of chromosome ``[left, right)``. +.. _sec_data_model_definitions_site: + site Tree sequences can define the mutational state of nodes as well as their topological relationships. A **site** is thought of as some position along the genome at which variation occurs. Each site is associated with a unique position and ancestral state. +.. _sec_data_model_definitions_mutation: + mutation A mutation records the change of state at a particular site 'above' a particular node (more precisely, along the branch between the node @@ -89,15 +105,23 @@ mutation back or recurrent mutations, a mutation must also specify its 'parent' mutation. +.. _sec_data_model_definitions_migration: + migration An event at which a parent and child node were born in different populations. +.. _sec_data_model_definitions_population: + population A grouping of nodes, e.g., by sampling location. +.. _sec_data_model_definitions_provenance: + provenance An entry recording the origin and history of the data encoded in a tree sequence. +.. _sec_data_model_definitions_ID: + ID In the set of interconnected tables that we define here, we refer throughout to the IDs of particular entities. The ID of an @@ -106,7 +130,9 @@ ID refer to node with ID zero, this corresponds to the node defined by the first row in the node table. -Sequence length +.. _sec_data_model_definitions_sequence_length: + +sequence length This value defines the coordinate space in which the edges and site positions are defined. This is most often assumed to be equal to the largest ``right`` coordinate in the edge table, but there are situations in which
gaussian_blur_function realized the gaussian blur line "if len(img_gblur)==3" still wasn't covered, and it was probably supposed to read as "if len(np.shapce(img_gblur)) == 3" to check whether the blurred image in grayscale or color
import cv2 import os +import numpy as np from plantcv.plantcv import print_image from plantcv.plantcv import plot_image from plantcv.plantcv import params @@ -32,7 +33,7 @@ def gaussian_blur(img, ksize, sigmax=0, sigmay=None): if params.debug == 'print': print_image(img_gblur, os.path.join(params.debug_outdir, str(params.device) + '_gaussian_blur.png')) elif params.debug == 'plot': - if len(img_gblur) == 3: + if len(np.shape(img_gblur)) == 3: plot_image(img_gblur) else: plot_image(img_gblur, cmap='gray')
Make release work with new black settings Summary: quotes Test Plan: none Reviewers: nate, dgibson
@@ -106,7 +106,7 @@ def set_version_info(self, new_version, dry_run=True): """ assert isinstance(new_version, six.string_types) - output = "__version__ = '{}'\n".format(new_version) + output = 'version = "{}"\n'.format(new_version) version_file = self.version_file_path
expressions: make tautological assignments shedding actually work TN:
@@ -818,21 +818,20 @@ class ResolvedExpression(object): ' not).'.format(self) ) - result = self._render_pre() + pre = self._render_pre() + expr = str(self._render_expr()) # Some expressions build their result directly inside the result # variable, and thus their _render_pre() method will only return the # name of the result variable. In such cases, there is no need to # add a tautological assignment (X:=X), which would hamper generated # code reading anyway. - if self.result_var and result != str(self.result_var.name): + if self.result_var and expr != str(self.result_var.name): return '{}\n{} := {};'.format( - result, - self.result_var.name.camel_with_underscores, - self._render_expr() + pre, self.result_var.name.camel_with_underscores, expr, ) else: - return result + return pre def render_expr(self): """
Syntax: Update constant.numeric completions This commit updates the constant completions for sublime-syntax developement according to the new scope naming guidelines. see:
@@ -14,10 +14,20 @@ DATA = """ constant numeric integer + binary + octal + decimal + hexadecimal + other float - hex + binary octal + decimal + hexadecimal + other complex + real + imaginary character escape language
Do not raise ValueError on invalid time range If an invalid time range is provided when requesting entities, ignore it instead of raising ValueError. That is to unify the behaviour with other filters, e.g. invalid translation statuses.
@@ -2182,8 +2182,6 @@ class Entity(DirtyFieldsMixin, models.Model): if re.match('^[0-9]{12}-[0-9]{12}$', time): start, end = utils.parse_time_interval(time) pre_filters.append(Entity.objects.between_time_interval(locale, start, end)) - else: - raise ValueError(time) if author: pre_filters.append(Entity.objects.authored_by(locale, author.split(',')))
Add NameError to exception in avahi_announce beacon Fixes If you have dbus installed, but not avahi, you will get a NameError in the dbus.Interface call. We need to catch this situation and not stacktrace when the beacon gets loaded.
@@ -32,7 +32,7 @@ try: GROUP = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, SERVER.EntryGroupNew()), avahi.DBUS_INTERFACE_ENTRY_GROUP) HAS_DBUS = True -except ImportError: +except (ImportError, NameError): HAS_DBUS = False except DBusException: HAS_DBUS = False
[bugfix][TEST] Fix broken solve_disambiguation.py test Travis still failing, because the new function is (as written in its description) supposed to return list of str, not list of Link
@@ -27,9 +27,7 @@ class TestGettingDisambigLinks(TestCase): minimum=0) page.text = '* [[Link1]]\n* [[Link2]]' newlinks = bot.get_disambiguation_links(page) - links = [ - pywikibot.Link('Link1', self.site), - pywikibot.Link('Link2', self.site)] + links = ['Link1', 'Link2'] self.assertEqual(newlinks, links) def test_get_without_templates(self): @@ -39,7 +37,7 @@ class TestGettingDisambigLinks(TestCase): minimum=0) page.text = '* [[Link1]]\n{{Disambig}}' newlinks = bot.get_disambiguation_links(page) - links = [pywikibot.Link('Link1', self.site)] + links = ['Link1'] self.assertEqual(newlinks, links)
Fix for ceph_status function name Fixes:
@@ -508,7 +508,7 @@ class HealthMonitorThread(threading.Thread): health_status = self.ceph_cluster.get_ceph_health(detail=True) if "HEALTH_ERROR" in health_status: self.ceph_cluster.health_error_status = ( - self.ceph_cluster.ceph_status() + self.ceph_cluster.get_ceph_status() )
Updated feature flag category and docs link While this might be useful for other projects it's currently being developed around ICDS's needs.
@@ -1089,9 +1089,10 @@ MESSAGE_LOG_METADATA = StaticToggle( BULK_CONDITIONAL_ALERTS = StaticToggle( 'bulk_conditional_alerts', - 'Allow bulk download and upload of conditional alerts.', - TAG_PRODUCT, + 'Allow bulk download and upload of conditional alerts', + TAG_CUSTOM, [NAMESPACE_DOMAIN], + help_link='https://confluence.dimagi.com/display/ccinternal/Allow+bulk+download+and+upload+of+conditional+alerts', ) COPY_CONDITIONAL_ALERTS = StaticToggle(
perf(graphene): replace json with orjson Accelerates legacy leaves call. JSON parsing +5% improved most likely.
from collections import defaultdict from datetime import datetime import math -import json +import orjson import os import pickle import posixpath @@ -387,7 +387,7 @@ class CloudVolumeGraphene(CloudVolumePrecomputed): else: url = posixpath.join(self.meta.base_path, path, "roots") args['node_ids'] = segids - data = json.dumps(args).encode('utf8') + data = orjson.dumps(args).encode('utf8') if gzip_condition: data = compression.compress(data, method='gzip') @@ -398,7 +398,7 @@ class CloudVolumeGraphene(CloudVolumePrecomputed): if binary: return np.frombuffer(response.content, dtype=np.uint64) else: - return json.loads(response.content)['root_ids'] + return orjson.loads(response.content)['root_ids'] def _get_roots_legacy(self, segids, timestamp): args = {}
fix: createLocalStoragePersistor to createWebStoragePersistor Due to breaking change in the offline cache API in React Query 3.17.0
@@ -5,7 +5,7 @@ import { UseQueryOptions, UseQueryResult, } from "react-query"; -import { createLocalStoragePersistor } from "react-query/createLocalStoragePersistor-experimental"; +import { createWebStoragePersistor } from "react-query/createWebStoragePersistor-experimental"; import { ReactQueryDevtools } from "react-query/devtools"; import { persistQueryClient } from "react-query/persistQueryClient-experimental"; @@ -22,7 +22,8 @@ export const queryClient = new QueryClient({ }, }); -const persistor = createLocalStoragePersistor({ +const persistor = createWebStoragePersistor({ + storage: localStorage, throttleTime: 100, });
Add info to user scoreboard csv * Add info to user scoreboard csv Added user id and user email fields to the user mode scoreboard csv export as per issue * Run formatter
@@ -111,7 +111,13 @@ def dump_scoreboard_csv(): ) writer.writerow(user_row) elif is_users_mode(): - header = ["place", "user", "score"] + user_field_names + header = [ + "place", + "user name", + "user id", + "user email", + "score", + ] + user_field_names writer.writerow(header) for i, standing in enumerate(standings): @@ -122,7 +128,13 @@ def dump_scoreboard_csv(): user_field_values = [ user_field_entries.get(f_id, "") for f_id in user_field_ids ] - user_row = [i + 1, user.name, standing.score] + user_field_values + user_row = [ + i + 1, + user.name, + user.id, + user.email, + standing.score, + ] + user_field_values writer.writerow(user_row) # In Python 3 send_file requires bytes
server_events: Fix the updation code for `add_emoji_by_admins_only`. `add_emoji_by_admins_only` backend setting is represented by page_param's `realm_add_emoji_by_admins_only` attribute. When this setting was changed we were wrongly updating the `add_emoji_by_admins_only` attribute which doesn't exist.
@@ -74,7 +74,7 @@ exports.dispatch_normal_event = function dispatch_normal_event(event) { page_params.realm_email_changes_disabled = event.value; settings_org.toggle_email_change_display(); } else if (event.op === 'update' && event.property === 'add_emoji_by_admins_only') { - page_params.add_emoji_by_admins_only = event.value; + page_params.realm_add_emoji_by_admins_only = event.value; } else if (event.op === 'update' && event.property === 'restricted_to_domain') { page_params.realm_restricted_to_domain = event.value; } else if (event.op === 'update' && event.property === 'message_retention_days') {
FIXME: Update 029-render_widget_host_view_mac.patch TODO: Restore support of transparent windows, see
@@ -2,14 +2,6 @@ diff --git a/content/browser/renderer_host/render_widget_host_view_mac.mm b/cont index faedde495761..c9d2beabd798 100644 --- a/content/browser/renderer_host/render_widget_host_view_mac.mm +++ b/content/browser/renderer_host/render_widget_host_view_mac.mm -@@ -88,6 +88,7 @@ - #include "ui/gfx/geometry/size_conversions.h" - #include "ui/gfx/scoped_ns_graphics_context_save_gstate_mac.h" - #include "ui/gl/gl_switches.h" -+#include "ui/gl/gpu_switching_manager.h" - - using content::BrowserAccessibility; - using content::BrowserAccessibilityManager; @@ -138,6 +139,11 @@ RenderWidgetHostView* GetRenderWidgetHostViewToUse( } // namespace @@ -80,14 +72,3 @@ index faedde495761..c9d2beabd798 100644 nil]); } return validAttributesForMarkedText_.get(); -@@ -3537,6 +3555,10 @@ extern NSString *NSTextInputReplacementRangeAttributeName; - } - - - (BOOL)isOpaque { -+ bool wantsTransparent = ui::GpuSwitchingManager::UseTransparent() || -+ (self.window && ![self.window isOpaque]); -+ if (wantsTransparent) -+ return NO; - return opaque_; - } -
MacOS: homebrew arm-none-eabi-gcc works again Revert "Fix gcc-arm-embedded for m1 mac (#24515)" This reverts commit
@@ -52,14 +52,9 @@ brew "zeromq" brew "protobuf" brew "protobuf-c" brew "swig" +cask "gcc-arm-embedded" EOS -# Install gcc-arm-embedded 10.3-2021.10. 11.x is broken on M1 Macs with Xcode 13.3~ -brew uninstall gcc-arm-embedded || true -curl -L https://github.com/Homebrew/homebrew-cask/raw/d407663b8017a0a062c7fc0b929faf2e16abd1ff/Casks/gcc-arm-embedded.rb > /tmp/gcc-arm-embedded.rb -brew install --cask /tmp/gcc-arm-embedded.rb -rm /tmp/gcc-arm-embedded.rb - echo "[ ] finished brew install t=$SECONDS" BREW_PREFIX=$(brew --prefix)
Changes node_modules to default to the same args for generate_sha1sum. Since we can use both perfer_offline=True and False in a since build prefer_offline shouldn't be used as a cache key or it will confuse the cleanup script. Since yarn install (if successful) should be idempotent. This will probably be ok.
@@ -20,28 +20,35 @@ if 'TRAVIS' in os.environ: NODE_MODULES_CACHE_PATH = os.path.join(ZULIP_SRV_PATH, 'zulip-npm-cache') YARN_BIN = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/bin/yarn') -def generate_sha1sum_node_modules(yarn_args=None): - # type: (Optional[List[str]]) -> str +DEFAULT_PRODUCTION = False + +def get_yarn_args(production): + # type: (bool) -> List[str] + if production: + yarn_args = ["--prod"] + else: + yarn_args = [] + return yarn_args + +def generate_sha1sum_node_modules(production=DEFAULT_PRODUCTION): + # type: (bool) -> str sha1sum = hashlib.sha1() sha1sum.update(subprocess_text_output(['cat', 'package.json']).encode('utf8')) sha1sum.update(subprocess_text_output(['cat', 'yarn.lock']).encode('utf8')) sha1sum.update(subprocess_text_output([YARN_BIN, '--version']).encode('utf8')) sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8')) - if yarn_args is not None: + yarn_args = get_yarn_args(production=production) sha1sum.update(''.join(sorted(yarn_args)).encode('utf8')) return sha1sum.hexdigest() -def setup_node_modules(production=False, stdout=None, stderr=None, copy_modules=False, +def setup_node_modules(production=DEFAULT_PRODUCTION, stdout=None, stderr=None, copy_modules=False, prefer_offline=False): # type: (bool, Optional[IO], Optional[IO], bool, bool) -> None - if production: - yarn_args = ["--prod"] - else: - yarn_args = [] + yarn_args = get_yarn_args(production=production) if prefer_offline: yarn_args.append("--prefer-offline") - sha1sum = generate_sha1sum_node_modules(yarn_args) + sha1sum = generate_sha1sum_node_modules(production=production) target_path = os.path.join(NODE_MODULES_CACHE_PATH, sha1sum) cached_node_modules = os.path.join(target_path, 'node_modules') success_stamp = os.path.join(target_path, '.success-stamp')
MAINT: previous patch not complete [CHANGED] need to specify the identifier as a keyword argument
@@ -266,15 +266,16 @@ class Composable(ComposableType): def __call__(self, val, *args, **kwargs): # initial invocation always transfers call() to first composable # element to get input for self + refobj = kwargs.get("identifier", val) if not val: return val if self.checkpointable: - job_done = self.job_done(val) + job_done = self.job_done(refobj) if job_done and self.output: - result = self._load_checkpoint(val) + result = self._load_checkpoint(refobj) elif job_done: - result = self._make_output_identifier(val) + result = self._make_output_identifier(refobj) if job_done: return result
Caffe2: fix error C2398 and syntax error with Visual Studio 2015 Summary: Similar fix to [pull #7024](https://github.com/pytorch/pytorch/pull/7024). Pull Request resolved:
@@ -249,13 +249,13 @@ ImageInputOp<Context>::ImageInputOp( // hard-coded PCA eigenvectors and eigenvalues, based on RBG channel order color_lighting_eigvecs_.push_back( - std::vector<float>{-144.7125, 183.396, 102.2295}); + std::vector<float>{-144.7125f, 183.396f, 102.2295f}); color_lighting_eigvecs_.push_back( - std::vector<float>{-148.104, -1.1475, -207.57}); + std::vector<float>{-148.104f, -1.1475f, -207.57f}); color_lighting_eigvecs_.push_back( - std::vector<float>{-148.818, -177.174, 107.1765}); + std::vector<float>{-148.818f, -177.174f, 107.1765f}); - color_lighting_eigvals_ = std::vector<float>{0.2175, 0.0188, 0.0045}; + color_lighting_eigvals_ = std::vector<float>{0.2175f, 0.0188f, 0.0045f}; CAFFE_ENFORCE_GT(batch_size_, 0, "Batch size should be nonnegative."); if (use_caffe_datum_) { @@ -466,7 +466,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue( CV_8UC1, const_cast<char*>(datum.data().data())), color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); - if (src.rows == 0 or src.cols == 0) { + if (src.rows == 0 || src.cols == 0) { num_decode_errors_in_batch_++; src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3); } @@ -541,7 +541,7 @@ bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue( CV_8UC1, const_cast<char*>(encoded_image_str.data())), color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE); - if (src.rows == 0 or src.cols == 0) { + if (src.rows == 0 || src.cols == 0) { num_decode_errors_in_batch_++; src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3); }
llvm, component: Do not memoize binary function. There already is a cache that preserves initialized instances based on function name.
@@ -924,7 +924,6 @@ class Component(object, metaclass=ComponentsMeta): _deepcopy_shared_keys = frozenset([ 'init_args', '_Component__llvm_function', - '_Component__llvm_bin_function', ]) class _CompilationData(ParametersBase): @@ -1167,21 +1166,17 @@ class Component(object, metaclass=ComponentsMeta): # MODIFIED 12/4/18 END self.__llvm_function = None - self.__llvm_bin_function = None self._compilation_data = self._CompilationData(owner=self) @property def _llvm_function(self): if self.__llvm_function is None: self.__llvm_function = self._gen_llvm_function() - self.__llvm_bin_function = None return self.__llvm_function @property def _llvmBinFunction(self): - if self.__llvm_bin_function is None: - self.__llvm_bin_function = pnlvm.LLVMBinaryFunction.get(self._llvm_function.name) - return self.__llvm_bin_function + return pnlvm.LLVMBinaryFunction.get(self._llvm_function.name) def _gen_llvm_function(self, extra_args=[]): llvm_func = None @@ -1228,7 +1223,6 @@ class Component(object, metaclass=ComponentsMeta): fun = get_deepcopy_with_shared_Components(self._deepcopy_shared_keys) newone = fun(self, memo) newone.__dict__['_Component__llvm_function'] = None - newone.__dict__['_Component__llvm_bin_function'] = None if newone.parameters is not newone.class_parameters: # may be in DEFERRED INIT, so parameters/defaults belongs to class
refactor(rez-pip): use dist info metadata attributes Use metadata attributes to retrieve core data such as the name, version and summary of the requested package. Relates
@@ -303,13 +303,12 @@ def pip_install_package(source_name, pip_version=None, python_version=None, variant_reqs.append("python-%s" % py_ver) - name, _ = parse_name_and_version(distribution.name_and_version) - name = distribution.name[0:len(name)].replace("-", "_") + name = metadata.name with make_package(name, packages_path, make_root=make_root) as pkg: - pkg.version = distribution.version - if distribution.metadata.summary: - pkg.description = distribution.metadata.summary + pkg.version = metadata.version + if metadata.summary: + pkg.description = metadata.summary pkg.variants = [variant_reqs] if requirements:
asc: Add retries and a timeout to asc_send Fixes:
@@ -109,8 +109,10 @@ bool asc_can_send(asc_dev_t *asc) bool asc_send(asc_dev_t *asc, const struct asc_message *msg) { - if (!asc_can_send(asc)) + if (poll32(asc->base + ASC_MBOX_A2I_CONTROL, ASC_MBOX_CONTROL_FULL, 0, 200000)) { + printf("asc: A2I mailbox full for 200ms. Is the ASC stuck?"); return false; + } dma_wmb(); write64(asc->base + ASC_MBOX_A2I_SEND0, msg->msg0);
ci: Install and run specific version of PostgreSQL ref:
@@ -8,6 +8,15 @@ addons: mariadb: 10.3 postgresql: 12.4 firefox: latest + apt: + packages: + - postgresql-12 + - postgresql-client-12 + +env: + global: + - PGPORT=5433 + - PGUSER=travis services: - xvfb @@ -25,7 +34,6 @@ cache: # https://docs.cypress.io/guides/guides/continuous-integration.html#Caching - ~/.cache - matrix: include: - name: "Python 3.7 MariaDB"
Release version 4.0.0.dev0 Breaking changes: dimod 0.10.x dwave-cloud-client 0.9.x
# See the License for the specific language governing permissions and # limitations under the License. -__version__ = '3.5.0' +__version__ = '4.0.0.dev0' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'Software development kit for open source D-Wave tools'
fix warnings from baybikes pipeline Test Plan: bk Reviewers: alangenfeld
mode_defs=MODES, preset_defs=WEATHER_INGEST_PRESETS + TRIP_INGEST_PRESETS + TRAINING_PRESETS, ) def generate_training_set_and_train_model(): - return train_daily_bike_supply_model(weather_etl(), trip_etl()) + train_daily_bike_supply_model(weather_etl(), trip_etl()) @pipeline( mode_defs=MODES, preset_defs=WEATHER_INGEST_PRESETS, ) def daily_weather_pipeline(): - return weather_etl() + weather_etl()
start putting debug_prints to use in the trace output $ PYPYLOG=loading-linklet,jit-log-opt,jit-summary:output-file pycket
@@ -7,6 +7,8 @@ from pycket.expand import JsonLoader from pycket.util import console_log, LinkletPerf, linklet_perf, PerfRegion from pycket.prims.correlated import syntax_primitives +from rpython.rlib.debug import debug_start, debug_stop, debug_print + def locate_linklet(file_name): import os from pycket.error import SchemeException @@ -63,13 +65,21 @@ def load_bootstrap_linklets(pycketconfig, debug=False): def load_inst_linklet_json(json_file_name, pycketconfig, debug=False, set_version=False): from pycket.env import w_version + debug_start("loading-linklet") + debug_print("loading and instantiating : %s" % json_file_name) + console_log("Loading linklet from %s" % json_file_name) linkl, sys_config = W_Linklet.load_linklet(json_file_name, JsonLoader(), set_version) + debug_print("DONE with loading : %s" % json_file_name) + console_log("Instantiating %s ...." % json_file_name) + debug_print("Instantiating %s ...." % json_file_name) instantiate_linklet = get_primitive("instantiate-linklet") linkl_instance = instantiate_linklet.call_interpret([linkl, w_null, w_false, w_false], pycketconfig) - console_log("DONE.") + debug_print("DONE Instantiating %s ...." % json_file_name) + debug_stop("loading-linklet") + console_log("DONE with the %s." % json_file_name) return linkl_instance, sys_config def set_path(kind_str, path_str):
DOC: Example for scipy.sparse.linalg.inv Added a small example to the docstring of scipy.sparse.linalg.inv
@@ -51,6 +51,22 @@ def inv(A): to be non-sparse, it will likely be faster to convert `A` to dense and use scipy.linalg.inv. + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import inv + >>> A = csc_matrix([[1., 0.], [1., 2.]]) + >>> Ainv = inv(A) + >>> Ainv + <2x2 sparse matrix of type '<type 'numpy.float64'>' + with 3 stored elements in Compressed Sparse Column format> + >>> A.dot(Ainv) + <2x2 sparse matrix of type '<type 'numpy.float64'>' + with 2 stored elements in Compressed Sparse Column format> + >>> A.dot(Ainv).todense() + matrix([[ 1., 0.], + [ 0., 1.]]) + .. versionadded:: 0.12.0 """
Fix 1 line and 3 lines between functions Fixes pylint complaints from jenkins: 17:29:08 salt/cloud/clouds/ec2.py:3740: [E8302(expected-2-blank-lines-found-0), ] PEP8 E302: expected 2 blank lines, found 0 17:29:08 salt/cloud/clouds/ec2.py:3768: [E8303(too-many-blank-lines-3), ] PEP8 E303: too many blank lines (3)
@@ -3716,7 +3716,6 @@ def disable_detailed_monitoring(name, call=None): Enable/disable detailed monitoring on a node CLI Example: - ''' if call != 'action': raise SaltCloudSystemExit( @@ -3737,6 +3736,7 @@ def disable_detailed_monitoring(name, call=None): return show_detailed_monitoring(name=name, instance_id=instance_id, call='action') + def enable_detailed_monitoring(name, call=None): ''' Enable/disable detailed monitoring on a node @@ -3764,7 +3764,6 @@ def enable_detailed_monitoring(name, call=None): return show_detailed_monitoring(name=name, instance_id=instance_id, call='action') - def show_delvol_on_destroy(name, kwargs=None, call=None): ''' Do not delete all/specified EBS volumes upon instance termination
fix epo playbook substr
@@ -211,7 +211,7 @@ tasks: - "10" scriptarguments: left: ${mcafee.latestDAT} - right: ${mcafee.epoDAT=val.substr(1,val.indexOf('.'))} + right: ${mcafee.epoDAT=val.substr(0,val.indexOf('.'))} results: - AreValuesEqual view: |-
setup.py: update the list of packages to install For (no-tn-check)
@@ -22,7 +22,11 @@ setup( url='https://www.adacore.com', description='A Python framework to generate language parsers', requires=['Mako', 'coverage', 'PyYAML', 'enum', 'enum34', 'funcy'], - packages=['langkit', 'langkit.utils', 'langkit.expressions'], + packages=['langkit', + 'langkit.expressions', + 'langkit.gdb', + 'langkit.stylechecks', + 'langkit.utils'], package_data={'langkit': [ 'support/*.adb', 'support/*.ads', 'support/*.gpr', 'templates/*.mako', 'templates/*/*.mako'
Fix broken references in docs The ExtractionPipelineRunsAPI class was moved, but the references to it in the docs were not.
@@ -1076,11 +1076,11 @@ Extraction pipeline runs ^^^^^^^^^^^^^^^^^^^^^^^^ List runs for an extraction pipeline ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: cognite.client._api.extractionpipelineruns.ExtractionPipelineRunsAPI.list +.. automethod:: cognite.client._api.extractionpipelines.ExtractionPipelineRunsAPI.list Report new runs ~~~~~~~~~~~~~~~ -.. automethod:: cognite.client._api.extractionpipelineruns.ExtractionPipelineRunsAPI.create +.. automethod:: cognite.client._api.extractionpipelines.ExtractionPipelineRunsAPI.create Data classes
Alpine: Also exclude "libstdc++" from on Alpine * Similar was done for Anaconda already. They bring their own stuff, and the goal is apparently only to run on Alpine if compiled there.
@@ -27,7 +27,7 @@ from nuitka.PythonFlavors import isAnacondaPython from nuitka.Tracing import inclusion_logger from nuitka.utils.Execution import executeProcess, withEnvironmentPathAdded from nuitka.utils.SharedLibraries import getSharedLibraryRPATH -from nuitka.utils.Utils import isPosixWindows +from nuitka.utils.Utils import isAlpineLinux, isPosixWindows from .DllDependenciesCommon import getLdLibraryPath @@ -205,7 +205,8 @@ _linux_dll_ignore_list = [ "libdrm.so", ] -if isAnacondaPython(): +if isAnacondaPython() or isAlpineLinux(): # Anaconda has these with e.g. torchvision, and insists on them being very new, # so they have to be included. + # Alpine linux does not include `libstdc++.so` by default. _linux_dll_ignore_list.remove("libstdc++.so")
[doc] Show pagegenerators options with -help also remove unused NL constant
@@ -19,6 +19,8 @@ write him in German and English. Command line options: +&params; + -always Skip the GUI validation -setcat: Set the category of the copied image @@ -51,9 +53,7 @@ deletion):: python pwb.py imagecopy.py -page:Image:<imagename> -delete -See pagegenerators.py for more ways to get a list of images. By default the bot works on your home wiki (set in user-config) - """ # Based on upload.py by: # (C) Rob W.W. Hooft, Andre Engels 2003-2007 @@ -93,7 +93,11 @@ except ImportError as _tk_error: Tkinter = _tk_error Tkdialog = object -NL = '' +# This is required for the text that is shown when you run this script +# with the parameter -help. +docuReplacements = { + '&params;': pagegenerators.parameterHelp +} nowCommonsTemplate = { '_default': u'{{NowCommons|%s}}',
Fix MediaBlock SVG icon size in the rich text editor Fixes
line-height: 1; padding: $controls-spacing * 2 $controls-spacing * 3; pointer-events: none; + + .icon { + @include svg-icon(); + } } &__icon {
Switch to Pecan by default Use the pecan framework by default. Implements: blueprint wsgi-pecan-switch
@@ -117,11 +117,11 @@ core_opts = [ cfg.BoolOpt('vlan_transparent', default=False, help=_('If True, then allow plugins that support it to ' 'create VLAN transparent networks.')), - cfg.StrOpt('web_framework', default='legacy', + cfg.StrOpt('web_framework', default='pecan', choices=('legacy', 'pecan'), help=_("This will choose the web framework in which to run " - "the Neutron API server. 'pecan' is a new experimental " - "rewrite of the API server.")), + "the Neutron API server. 'pecan' is a new " + "rewrite of the API routing components.")), cfg.IntOpt('global_physnet_mtu', default=constants.DEFAULT_NETWORK_MTU, deprecated_name='segment_mtu', deprecated_group='ml2', help=_('MTU of the underlying physical network. Neutron uses '
Add command to run `recreate_pdf_for_precompiled_or_uploaded_letter` We already had the `replay-create-pdf-for-templated-letter` command. This adds a new command, `recreate-pdf-for-precompiled-or-uploaded-letter` which does the same thing but for non-templated letters.
@@ -19,7 +19,10 @@ from sqlalchemy.orm.exc import NoResultFound from app import db from app.aws import s3 -from app.celery.letters_pdf_tasks import get_pdf_for_templated_letter +from app.celery.letters_pdf_tasks import ( + get_pdf_for_templated_letter, + resanitise_pdf, +) from app.celery.reporting_tasks import ( create_nightly_notification_status_for_day, ) @@ -279,6 +282,14 @@ def replay_create_pdf_for_templated_letter(notification_id): get_pdf_for_templated_letter.apply_async([str(notification_id)], queue=QueueNames.CREATE_LETTERS_PDF) +@notify_command(name='recreate-pdf-for-precompiled-or-uploaded-letter') [email protected]('-n', '--notification_id', type=click.UUID, required=True, + help="Notification ID of the precompiled or uploaded letter") +def recreate_pdf_for_precompiled_or_uploaded_letter(notification_id): + print(f"Call resanitise_pdf task for notification: {notification_id}") + resanitise_pdf.apply_async([str(notification_id)], queue=QueueNames.LETTERS) + + @notify_command(name='replay-service-callbacks') @click.option('-f', '--file_name', required=True, help="""Full path of the file to upload, file is a contains client references of
Create the artifacts directory in case any test utilities wish to use it, harmless if they do not
@@ -31,6 +31,11 @@ function setup { rm *.log || true + if [ -n "$GITHUB_ACTIONS" ]; then + # create the artifacts dir with container-write ownership + install -dm0755 -o cchq -g cchq ./artifacts + fi + pip-sync requirements/test-requirements.txt pip check # make sure there are no incompatibilities in test-requirements.txt python_preheat # preheat the python libs
Fix non-removed interior axis labels and diagonal legends in PairGrid These were oversights from
@@ -1278,6 +1278,22 @@ class PairGrid(Grid): self._legend_data = {} # Make the plot look nice + for ax in axes[:-1, :].flat: + if ax is None: + continue + for label in ax.get_xticklabels(): + label.set_visible(False) + ax.xaxis.offsetText.set_visible(False) + ax.xaxis.label.set_visible(False) + + for ax in axes[:, 1:].flat: + if ax is None: + continue + for label in ax.get_yticklabels(): + label.set_visible(False) + ax.yaxis.offsetText.set_visible(False) + ax.yaxis.label.set_visible(False) + self._tight_layout_rect = [.01, .01, .99, .99] self._tight_layout_pad = layout_pad self._despine = despine @@ -1436,6 +1452,7 @@ class PairGrid(Grid): plot_kwargs.setdefault("hue_order", self._hue_order) plot_kwargs.setdefault("palette", self._orig_palette) func(x=vector, **plot_kwargs) + ax.legend_ = None self._add_axis_labels() return self
Missing Double Quotes in the Creating a Sawtooth Network Instructions Double Quotes are missing in the "Creating a Sawtooth Network", step 1, This line: `sawtooth.poet.report_public_key_pem=$(cat /etc/sawtooth/simulator_rk_pub.pem`) Should be: `sawtooth.poet.report_public_key_pem="$(cat /etc/sawtooth/simulator_rk_pub.pem)"` Otherwise, the following error occurs: `sawset:error : unrecognised argument`sawset proposal create -k`
@@ -320,7 +320,7 @@ in :doc:`ubuntu`. $ sawset proposal create -k /etc/sawtooth/keys/validator.priv \ -o config.batch \ sawtooth.consensus.algorithm=poet \ - sawtooth.poet.report_public_key_pem=$(cat /etc/sawtooth/simulator_rk_pub.pem) \ + sawtooth.poet.report_public_key_pem="$(cat /etc/sawtooth/simulator_rk_pub.pem)" \ sawtooth.poet.valid_enclave_measurements=$(poet enclave measurement) \ sawtooth.poet.valid_enclave_basenames=$(poet enclave basename)
TEMPFIX autoscaler provision issue in china region [issue](https://github.com/kubernetes/autoscaler/issues/3276) [tempory fix](https://github.com/kubernetes/autoscaler/issues/3076) is set aws-use-static-instance-list = true
@@ -23,6 +23,8 @@ Conditions: NeedsStaticList: !Or - !Equals [!Ref 'AWS::Region', 'af-south-1'] - !Equals [!Ref 'AWS::Region', 'eu-south-1'] + - !Equals [!Ref 'AWS::Region', 'cn-north-1'] + - !Equals [!Ref 'AWS::Region', 'cn-northwest-1'] Mappings: Config: Prefix: { Value: 'eks-quickstart' }
[Keyvault] Fix deprecated/removed method in keyvault certificate download `base64.encodestring()` has been a deprecated alias for `base64.encodebytes()` since Python 3.1, and have has now been removed in Python 3.9. This replaces the removed/deprecated method with the one it was aliased to. Fixes
@@ -1570,7 +1570,7 @@ def download_certificate(client, file_path, vault_base_url=None, certificate_nam f.write(cert) else: import base64 - encoded = base64.encodestring(cert) # pylint:disable=deprecated-method + encoded = base64.encodebytes(cert) if isinstance(encoded, bytes): encoded = encoded.decode("utf-8") encoded = '-----BEGIN CERTIFICATE-----\n' + encoded + '-----END CERTIFICATE-----\n'
Remove repetitive command and simplify code Remove command line running calculate_pressure_matrix_numerical() method without checking the argument first. In calculate_pressure_matrix_numerical(), replace loop by numpy command to help efficiency.
@@ -278,7 +278,6 @@ class FluidFlow: self.geometry_description() self.analytical_pressure_matrix_available = False self.numerical_pressure_matrix_available = False - self.calculate_pressure_matrix_numerical() if immediately_calculate_pressure_matrix_numerically: self.calculate_pressure_matrix_numerical() @@ -592,13 +591,9 @@ class FluidFlow: c1, c2, c0w = self.calculate_coefficients() M, f = self.mounting_matrix(c1, c2, c0w) P = self.resolves_matrix(M, f) - for i in range(self.nz): - for j in range(self.ntheta): - k = j * self.nz + i - if P[k] < 0: - self.p_mat_numerical[i, j] = 0 - else: - self.p_mat_numerical[i, j] = P[k] + self.p_mat_numerical = np.clip( + P.reshape((self.ntheta, self.nz)), a_min=0, a_max=None + ).T self.numerical_pressure_matrix_available = True return self.p_mat_numerical
Adding _run_command_interactive For commands that waiting for user input
@@ -67,6 +67,16 @@ class Recipe(abc.ABC): output, error = process.communicate() return output, error + @staticmethod + def _run_command_interactive(command): + """Runs the given interactive command that waits for user input and returns any output and error""" + process = subprocess.Popen( + command.split(), shell=True, stderr=subprocess.PIPE + ) + output, error = process.communicate() + return output, error + + @staticmethod def _get_project_id(): get_project_command = "gcloud config list --format value(core.project)"
[GDPR] Fix omission RE code jam system info Full text: Rectified an omission in the old privacy policy, where the system information you provide as part of a code jam signup wasn't mentioned.
<td>A factor in code jam team match-ups</td> <td>Administrative staff</td> </tr> + + <tr class="thick-bottom-border"> + <td>System Information</td> + <td class="uk-table-shrink">Code jam signup</td> + <td>Used to verify that you have a working environment</td> + <td>Administrative staff</td> + </tr> </tbody> </table> </p> <ul class="uk-list uk-list-divider"> + <li> + <h4>June 5th, 2018</h4> + <p> + Rectified an omission in the old privacy policy, where the system information you provide + as part of a code jam signup wasn't mentioned. + </p> + </li> <li> <h4>May 31st, 2018</h4> <p>
Update installing.rst I would default to installing vaex as user, not as root. Modified docs accordingly.
@@ -14,7 +14,7 @@ The vaex program (with this you cannot do any Python programming) is available f See the next section how to get it. For using vaex as a library, install vaex using pip or conda. * **Standalone version**: download ( `osx <//vaex.astro.rug.nl/program/vaex-1.0.0-beta.4-osx.zip>`_ | `linux <//vaex.astro.rug.nl/program/vaex-1.0.0-beta.4-linux.tar.gz>`_ ) - * **Python package**: ``pip install --pre vaex`` (no root? use ``pip install --pre --user vaex``) + * **Python package**: ``pip install --pre --user vaex`` (for system-wide install, use ``pip install --pre vaex``) * **Anaconda users**: ``conda install -c conda-forge vaex``
Update GB Source is ENTSO-E like before
] ], "capacity": { - "biomass": 4237, - "coal": 6780, - "gas": 38274, + "biomass": 4528, + "coal": 5241, + "gas": 39822, "geothermal": 0, - "hydro": 1882, - "hydro storage": 4052, - "nuclear": 8209, + "hydro": 1919, + "hydro storage": 4309, + "nuclear": 8256, "oil": 0, - "solar": 13276, - "unknown": 6155, - "wind": 23200 + "solar": 13378, + "unknown": 4708, + "wind": 25097 }, "contributors": [ "https://github.com/corradio",
Update zotero.py This change enables compatibility with python3 (which is broken because dict.keys() returns an iterable and not a list in python3). Also using sets instead of lists ensures that the order of the keys does not matter.
@@ -902,7 +902,7 @@ class Zotero(object): 'filename']) template = template | set(self.temp_keys) for pos, item in enumerate(items): - if item.keys() == [u'links', u'library', u'version', u'meta', u'key', u'data']: + if set(item) == set([u'links', u'library', u'version', u'meta', u'key', u'data']): # we have an item that was retrieved from the API item = item['data'] to_check = set(i for i in list(item.keys()))
Adjust pop-up docs position Shift pop-up docs to the right position according to responsiveness
@@ -106,10 +106,18 @@ header { .filter-input .popover { top: 27px; + left: 43px; display: block; max-width: none; opacity: 0.9; + @media (max-width: @screen-xs-max) { + + top: 16px; + left: 29px; + right: 2px; + } + .popover-content { max-height: 500px; overflow-y: auto;
fix: Whitelist bootup log for SGI XFS verify_boot_error_fail_warnings test fails on Mariner due to SGI XFS log message displayed on bootup. Add log to whitelist as the log does not indicate any failures.
@@ -203,6 +203,7 @@ class AzureImageStandard(TestSuite): re.compile(r"(.*was skipped because of a failed condition check.*)$", re.M), re.compile(r"^(.*GRUB failed boot detection.*)$", re.M), re.compile(r"^(.*nofail.*)$", re.M), + re.compile(r"^(.*SGI XFS with ACLs, security attributes, realtime, verbose warnings, quota, no debug enabled.*)$", re.M) ] @TestCaseMetadata(
gear-menu: Inherit body color. Instead of overriding the default link color with grey, just inherit the body color.
@@ -1745,6 +1745,10 @@ blockquote p { box-shadow: 0px 0px 5px hsla(0, 0%, 0%, 0.2); } +.nav .dropdown-menu a { + color: inherit; +} + .nav .dropdown-menu:after { position: absolute; width: 0px;
Update coproxamol.json added comment in SQL
], "numerator_from": "{hscic}.normalised_prescribing_standard ", "numerator_where": [ - "(bnf_code LIKE '0407010Q0%') " + "(", + "bnf_code LIKE '0407010Q0%' -- Co-Proxamol (Dextroprop HCl/Paracet) (brand and generic) \n", + ")" ], "denominator_columns": [ "SUM(total_list_size / 1000.0) AS denominator, "
Fix Makefile: install frontend dependencies from locked file Previously it was overwriting lockfile, and installing newest versions.
@@ -54,7 +54,7 @@ schema : ## node_modules : install front-end dependencies using Yarn node_modules : package.json - yarn install + yarn install --frozen-lockfile touch node_modules ## git_version : store details about the current commit and tree state.
[AIR] Fix `TensorflowTrainer` docstring example You can't run the code, because we don't provide an argument to train_loop_per_worker.
@@ -85,25 +85,23 @@ class TensorflowTrainer(DataParallelTrainer): Example: - .. code-block:: python + .. testcode:: import tensorflow as tf import ray from ray.air import session, Checkpoint - from ray.train.tensorflow import TensorflowTrainer from ray.air.config import ScalingConfig - - input_size = 1 + from ray.train.tensorflow import TensorflowTrainer def build_model(): # toy neural network : 1-layer return tf.keras.Sequential( [tf.keras.layers.Dense( - 1, activation="linear", input_shape=(input_size,))] + 1, activation="linear", input_shape=(1,))] ) - def train_loop_for_worker(config): + def train_loop_per_worker(config): dataset_shard = session.get_dataset_shard("train") strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() with strategy.scope(): @@ -111,12 +109,12 @@ class TensorflowTrainer(DataParallelTrainer): model.compile( optimizer="Adam", loss="mean_squared_error", metrics=["mse"]) - for epoch in range(config["num_epochs"]): tf_dataset = dataset_shard.to_tf( feature_columns="x", label_columns="y", batch_size=1 ) + for epoch in range(config["num_epochs"]): model.fit(tf_dataset) # You can also use ray.air.integrations.keras.Callback # for reporting and checkpointing instead of reporting manually. @@ -127,13 +125,20 @@ class TensorflowTrainer(DataParallelTrainer): ), ) - train_dataset = ray.data.from_items( - [{"x": x, "y": x + 1} for x in range(32)]) - trainer = TensorflowTrainer(scaling_config=ScalingConfig(num_workers=3), + train_dataset = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)]) + trainer = TensorflowTrainer( + train_loop_per_worker=train_loop_per_worker, + scaling_config=ScalingConfig(num_workers=3), datasets={"train": train_dataset}, - train_loop_config={"num_epochs": 2}) + train_loop_config={"num_epochs": 2}, + ) result = trainer.fit() + .. testoutput:: + :hide: + :options: +ELLIPSIS + + ... Args: train_loop_per_worker: The training function to execute.
`!user` command says if user is "Verified" Previously, `!user` said if the user is "Pending", whereas "Verified" is the boolean opposite.
@@ -229,9 +229,9 @@ class Information(Cog): if on_server: joined = time_since(user.joined_at, max_units=3) roles = ", ".join(role.mention for role in user.roles[1:]) - membership = {"Joined": joined, "Pending": user.pending, "Roles": roles or None} + membership = {"Joined": joined, "Verified": not user.pending, "Roles": roles or None} if not is_mod_channel(ctx.channel): - membership.pop("Pending") + membership.pop("Verified") membership = textwrap.dedent("\n".join([f"{key}: {value}" for key, value in membership.items()])) else:
settings: Change heading of modal used to edit user info. This commit changes the heading of modal used by admins to edit user info to "Manage user".
@@ -586,7 +586,7 @@ function handle_human_form(tbody, status_field) { } dialog_widget.launch({ - html_heading: $t_html({defaultMessage: "Change user info and roles"}), + html_heading: $t_html({defaultMessage: "Manage user"}), html_body, on_click: submit_user_details, post_render: set_role_dropdown_and_fields_user_pills,
Retry on 500 in HTTPClient.request Discord official client retries on 500, so worst case scenario, we're not any worse than the official client which seriously outnumbers us.
@@ -193,8 +193,8 @@ class HTTPClient: continue - # we've received a 502, unconditional retry - if r.status == 502 and tries <= 5: + # we've received a 500 or 502, unconditional retry + if r.status in {500, 502} and tries <= 5: yield from asyncio.sleep(1 + tries * 2, loop=self.loop) continue
Remove unnecessary constraints The sets `NON_FIXED_INVESTFLOWS` and `FIXED_INVESTFLOWS` and their corresponding constraints are removed because they seem to be unnecessary in the `NonConvexInvestFlow` and removing them would increase the computation time.
@@ -466,16 +466,6 @@ class NonConvexInvestFlowBlock(SimpleBlock): ] ) - # Investment-related sets similar to the - # <class 'oemof.solph.flows.InvestmentFlow'> class. - self.FIXED_INVESTFLOWS = Set( - initialize=[(g[0], g[1]) for g in group if g[2].fix[0] is not None] - ) - - self.NON_FIXED_INVESTFLOWS = Set( - initialize=[(g[0], g[1]) for g in group if g[2].fix[0] is None] - ) - # New nonconvex-investment-related set defines in the # <class 'oemof.solph.flows.NonconvexInvestFlow'> class. self.NON_CONVEX_INVEST_FLOWS = Set( @@ -722,21 +712,6 @@ class NonConvexInvestFlowBlock(SimpleBlock): self.NON_CONVEX_INVEST_FLOWS, rule=_max_invest_rule ) - def _investflow_fixed_rule(block, i, o, t): - """Rule definition of constraint to fix flow variable - of investment flow to (normed) actual value - """ - expr = m.flow[i, o, t] == ( - (m.flows[i, o].investment.existing + self.invest[i, o]) - * m.flows[i, o].fix[t] - ) - - return expr - - self.fixed = Constraint( - self.FIXED_INVESTFLOWS, m.TIMESTEPS, rule=_investflow_fixed_rule - ) - # New nonconvex-investment-related constraints defined in the # <class 'oemof.solph.flows.NonConvexInvestFlow'> class.
Update coords.py Updated failures to be more informative, allowing more intellegent error catches.
@@ -191,6 +191,10 @@ def scale_units(out_unit, in_unit): if in_unit.lower() in accepted_units[kk]: in_key = kk + if out_key is None and in_key is None: + raise ValueError(''.join(['Cannot scale {:s} and '.format(in_unit) + '{:s}, unknown units'.format(out_unit)])) + if out_key is None: raise ValueError('Unknown output unit {:}'.format(out_unit))
Inject scheme into compared urls This injects a scheme into some compared urls as urlsplit will fail to properly split if the scheme is absent in some versions.
@@ -909,6 +909,8 @@ class TestGeneratePresignedPost(unittest.TestCase): class TestGenerateDBAuthToken(BaseSignerTest): + maxDiff = None + def setUp(self): self.session = botocore.session.get_session() self.client = self.session.create_client( @@ -935,7 +937,11 @@ class TestGenerateDBAuthToken(BaseSignerTest): 'us-east-1%2Frds-db%2Faws4_request&X-Amz-Signature' '=d1138cdbc0ca63eec012ec0fc6c2267e03642168f5884a7795320d4c18374c61' ) - self.assert_url_equal(result, expected_result) + + # A scheme needs to be appended to the beginning or urlsplit may fail + # on certain systems. + self.assert_url_equal( + 'https://' + result, 'https://' + expected_result) def test_custom_region(self): hostname = 'host.us-east-1.rds.amazonaws.com'
Cleanup holiday.py Guess who didn't run tox again
@@ -19,10 +19,9 @@ class Holiday: Returns true if the holiday is celebrated today ''' now = datetime.now() - return self.date.month == now.month and \ - self.date.day == now.day + return self.date.month == now.month and self.date.day == now.day [email protected]_schedule('cron', hour=14, minute=45, timezone='Australia/Brisbane') [email protected]_schedule('cron', hour=9, timezone='Australia/Brisbane') def holiday() -> None: ''' Posts a random celebratory day on #general from @@ -30,8 +29,7 @@ def holiday() -> None: ''' channel = bot.channels.get("general") - now = datetime.now().strftime("%d %b").lstrip('0') - holiday = get_holiday(now) + holiday = get_holiday() if holiday is None: return @@ -40,7 +38,7 @@ def holiday() -> None: channel=channel.id, timestamp=message['ts']) -def get_holiday(day:str) -> Holiday: +def get_holiday() -> Holiday: ''' Gets the holiday for a given day. If there are multiple holidays, choose a random one.
ASTNodeType, StructType, _EnumType: remove nullexpr() override TN:
@@ -1453,6 +1453,7 @@ class StructType(BaseStructType): name, location, doc, is_ptr=False, null_allowed=True, + nullexpr=(names.Name('No') + name).camel_with_underscores, is_ada_record=True, # A compile pass will tag all StructType subclasses that are @@ -1487,13 +1488,6 @@ class StructType(BaseStructType): CompiledTypeMetaclass.root_grammar_class.entity(), ) - def nullexpr(self): - """ - Return a value that can be considered as "null" for this struct type. - :rtype: str - """ - return (names.Name('No') + self.name).camel_with_underscores - def c_inc_ref(self, capi): """ Name of the C API function to inc-ref structure value. @@ -1594,8 +1588,8 @@ class ASTNodeType(BaseStructType): name, location, doc, is_ptr=True, null_allowed=True, is_ada_record=False, is_list_type=is_list, should_emit_array_type=not is_root, - exposed=True, is_refcounted=False, py_nullexpr='None', - element_type=element_type + exposed=True, is_refcounted=False, nullexpr=null_constant(), + py_nullexpr='None', element_type=element_type ) self._base = base @@ -1698,15 +1692,6 @@ class ASTNodeType(BaseStructType): result = self._repr_name or self.name.camel return result - def nullexpr(self): - """ - Return a value that can be considered as "null" for this AST node type. - It indicates the absence of AST node. - - :rtype: str - """ - return null_constant() - def is_builtin(self): """ Some AST nodes are considered "built-in", which means that either no @@ -2248,7 +2233,7 @@ class _EnumType(CompiledType): """ super(_EnumType, self).__init__( name, location, doc, - is_ptr=False + is_ptr=False, nullexpr='Uninitialized' ) self.alternatives = alternatives self.suffix = suffix @@ -2283,9 +2268,6 @@ class _EnumType(CompiledType): def name(self): return self._name + names.Name('Type') - def nullexpr(self): - return 'Uninitialized' - def c_type(self, c_api_settings): return CAPIType(c_api_settings, self.base_name())
PR to fix test_pods_are_not_oomkilled_while_running_ios CI run Updated fio runtime less than 2min of get_fio_result() timeout value.
@@ -66,7 +66,7 @@ class TestPodAreNotOomkilledWhileRunningIO(E2ETest): log.info(f"Running FIO to fill PVC size: {io_size_mb}") self.pod_obj.run_io( - 'fs', size=io_size_mb, io_direction='write', runtime=600 + 'fs', size=io_size_mb, io_direction='write', runtime=480 ) log.info("Waiting for IO results")
Randomize xla port Summary: fixes Pull Request resolved:
@@ -213,7 +213,9 @@ test_custom_script_ops() { test_xla() { export XLA_USE_XRT=1 XRT_DEVICE_MAP="CPU:0;/job:localservice/replica:0/task:0/device:XLA_CPU:0" - export XRT_WORKERS="localservice:0;grpc://localhost:40934" + # Issue #30717: randomize the port of XLA/gRPC workers is listening on to reduce flaky tests. + XLA_PORT=`shuf -i 40701-40999 -n 1` + export XRT_WORKERS="localservice:0;grpc://localhost:$XLA_PORT" pushd xla echo "Running Python Tests" ./test/run_tests.sh
Reduce size of search filter calendar image Current image feels a little over-sized
@@ -37,7 +37,7 @@ Use ``before:`` to find posts before a specified date and ``after:`` to find pos - Searching ``website on: 2018-09-01`` will return messages that contain the keyword ``website`` that were posted on September 1, 2018. .. image:: ../../images/calendar2.png - + :width: 300 px Quotation Marks ^^^^^^^^^^^^^^^^^
Remove invalid assumption from FITS test case... It was incorrect to assume that the HDU data field would always be padded to 2880 bytes, so this check has been removed from the tests.
@@ -90,7 +90,6 @@ def test_embed_asdf_in_fits_file(tmpdir, backwards_compat): assert asdf_hdu.data.tostring().strip().endswith(b'...') else: assert isinstance(asdf_hdu, fits_embed._AsdfHDU) - assert len(asdf_hdu.data) % 2880 == 0 with fits_embed.AsdfInFits.open(hdulist2) as ff2: assert_tree_match(tree, ff2.tree) @@ -116,7 +115,6 @@ def test_embed_asdf_in_fits_file_anonymous_extensions(tmpdir): asdf_hdu = hdulist['ASDF'] assert isinstance(asdf_hdu, fits_embed._AsdfHDU) assert asdf_hdu.data.tostring().startswith(b'#ASDF') - assert len(asdf_hdu.data) % 2880 == 0 with fits_embed.AsdfInFits.open(hdulist) as ff2: assert_tree_match(asdf_in_fits.tree, ff2.tree)
Allow one extra 5s period in scale in test Sometimes the scale happens one 5-second period longer than usual (because the idle time is 5 seconds minus tiny amount) and so the workers have not timed out by the time the final assert fires.
@@ -72,7 +72,7 @@ def test_scale_out(): assert dfk.executors['htex_local'].outstanding == 0, "Expected 0 outstanding tasks after future completion" logger.info("waiting a while for scale down") - time.sleep(20) + time.sleep(25) logger.info("asserting 2 managers remain") assert len(dfk.executors['htex_local'].connected_managers) == 2, "Expected 2 managers when no tasks, lower bound by min_blocks"
docs: Add links towards GitHub guide in installation instructions. This adds two extra links to in the overall requirements, and once before Step 2: Get Zulip Code. Fixes
@@ -11,6 +11,7 @@ all related services will run. Contents: * [Requirements](#requirements) +* [Step 0: Set up Git & GitHub](#step-0-set-up-git-github) * [Step 1: Install Prerequisites](#step-1-install-prerequisites) * [Step 2: Get Zulip code](#step-2-get-zulip-code) * [Step 3: Start the development environment](#step-3-start-the-development-environment) @@ -44,7 +45,7 @@ connection throughout the entire installation processes. (See [Specifying a proxy](#specifying-a-proxy) if you need a proxy to access the internet.) -- **All**: 2GB available RAM, Active broadband internet connection. +- **All**: 2GB available RAM, Active broadband internet connection, [GitHub account][set-up-git]. - **macOS**: macOS (10.11 El Capitan or 10.12 Sierra recommended), Git, [VirtualBox][vbox-dl], [Vagrant][vagrant-dl-macos]. - **Ubuntu**: 14.04 64-bit or 16.04 64-bit, Git, [Vagrant][vagrant-dl-deb], lxc. @@ -57,6 +58,14 @@ proxy](#specifying-a-proxy) if you need a proxy to access the internet.) Don't see your system listed above? See [Advanced setup][install-advanced] for details about installing for other Linux and UNIX platforms. +### Step 0: Set up Git & GitHub + +Follow our [Git Guide][set-up-git] in order to install Git and set up a GitHub account. + +If you haven't already created an ssh key and added it to your GitHub account, +you should do that now by following [these +instructions](https://help.github.com/articles/generating-an-ssh-key/). + ### Step 1: Install Prerequisites Jump to: @@ -251,10 +260,6 @@ Now you are ready for [Step 2: Get Zulip Code.](#step-2-get-zulip-code) ### Step 2: Get Zulip Code -If you haven't already created an ssh key and added it to your GitHub account, -you should do that now by following [these -instructions](https://help.github.com/articles/generating-an-ssh-key/). - 1. In your browser, visit <https://github.com/zulip/zulip> and click the `fork` button. You will need to be logged in to GitHub to do this. @@ -1004,3 +1009,4 @@ for the IP address that means any IP address can connect to your development ser [rtd-dev-remote]: dev-remote.html [git-bash]: https://git-for-windows.github.io/ [bash-admin-setup]: https://superuser.com/questions/1002262/run-applications-as-administrator-by-default-in-windows-10 +[set-up-git]: git-guide.html#set-up-git
Add stack descendants to status stream Mark stacks with M for master M for nested
@@ -28,7 +28,14 @@ class TerminalPrinter: def _print_stack_tree(stack, buffer): padding_1 = " " buffer.append( - "{}{}stack {} {}".format(padding_1, "\u250f ", "\u24c5", stack.name) + "{}{}stack {} {}".format(padding_1, "\u250f ", "\u24c2", stack.name) + ) + if stack.descendants(): + for nested_stack in stack.descendants(): + buffer.append( + "{}{}stack {} {}".format( + padding_1, "\u2523 ", "\u24c3", nested_stack.name + ) ) buffer.append("{}{} region: {}".format(padding_1, "\u2523", stack.region_name)) buffer.append(
tox: Fix container purge jobs On containerized CI jobs the playbook executed is purge-cluster.yml but it should be set to purge-docker-cluster.yml
@@ -239,10 +239,10 @@ setenv= INVENTORY = {env:_INVENTORY:hosts} container: CONTAINER_DIR = /container container: PLAYBOOK = site-docker.yml.sample + container: PURGE_PLAYBOOK = purge-docker-cluster.yml storage_inventory: COPY_ADMIN_KEY = True podman: PLAYBOOK = site-docker.yml.sample non_container: PLAYBOOK = site.yml.sample - container-purge_cluster: PURGE_PLAYBOOK = purge-docker-cluster.yml shrink_mon: MON_TO_KILL = mon2 shrink_osd: COPY_ADMIN_KEY = True
`maestral set-dir` now takes the new path as argument ... instead of an option
@@ -460,7 +460,7 @@ def notify(config_name: str, yes: bool, running: bool): @main.command() @with_config_opt [email protected]("--new-path", "-p", type=click.Path(writable=True), default=None) [email protected]("new_path", required=False, type=click.Path(writable=True)) def set_dir(config_name: str, new_path: str, running: bool): """Change the location of your Dropbox folder.""" @@ -695,8 +695,7 @@ def clear(config_name: str, running: bool): @log.command() [email protected]('level_name', required=False, - type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR'])) [email protected]('level_name', required=False, type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR'])) @with_config_opt def level(config_name: str, level_name: str, running: bool): """Gets or sets the log level. Changes will take effect after restart."""
Don't be verbose when testing ansible An experiment to see if this alleviates
@@ -12,7 +12,7 @@ curl https://bootstrap.pypa.io/get-pip.py | python sed -i 's/^# *\(en_US.UTF-8\)/\1/' /etc/locale.gen && locale-gen # Run the playbook -/usr/local/bin/ansible-playbook -v travis.yml +/usr/local/bin/ansible-playbook travis.yml # Do minimal database-connection test su -c 'SKIP_NPM_BUILD=1 /openprescribing/venv/bin/python /openprescribing/openprescribing/manage.py test frontend.tests.test_models.SearchBookmarkTestCase' vagrant
Update common/chromium/disable_user_gesture_requirement_for_beforeunload_dialogs.patc The Great Blink mv for source files, part 2
-diff --git a/third_party/WebKit/Source/core/dom/Document.cpp b/third_party/WebKit/Source/core/dom/Document.cpp +diff --git a/third_party/blink/renderer/core/dom/document.cc b/third_party/blink/renderer/core/dom/document.cc index a39067db8c52..4a0d69dc3fb5 100644 ---- a/third_party/WebKit/Source/core/dom/Document.cpp -+++ b/third_party/WebKit/Source/core/dom/Document.cpp +--- a/third_party/blink/renderer/core/dom/document.cc ++++ b/third_party/blink/renderer/core/dom/document.cc @@ -3239,7 +3239,9 @@ bool Document::DispatchBeforeUnloadEvent(ChromeClient& chrome_client, "Blocked attempt to show a 'beforeunload' confirmation panel for a " "frame that never had a user gesture since its load. "
Fix qualitative file creation name Fix qualitative file name so that it does not contain : This allows the file to be exported under this filename without errors
@@ -288,7 +288,7 @@ class DialogImportSurvey(QtWidgets.QDialog): case_text_list = [] if self.fields_type[field] == "qualitative": self.fields[field] - # create one text file combining each row, prefix [case identfier] to each row. + # create one text file combining each row, prefix [case identifier] to each row. pos0 = 0 pos1 = 0 fulltext = "" @@ -300,8 +300,10 @@ class DialogImportSurvey(QtWidgets.QDialog): pos1 = len(fulltext) - 2 case_text = [self.settings['codername'], now_date, "", pos0, pos1, name_and_caseids[row][1]] case_text_list.append(case_text) - # add the current time to the file name to prevent sqlite Integrity Error - cur.execute(source_sql, (self.fields[field] +"_" + str(now_date), fulltext, "", self.settings['codername'], now_date)) + # add the current time to the file name to ensure uniqueness and to + # prevent sqlite Integrity Error. Do not use now_date which contains colons + now = str(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S")) + cur.execute(source_sql, (self.fields[field] +"_" + now, fulltext, "", self.settings['codername'], now_date)) self.settings['conn'].commit() cur.execute("select last_insert_rowid()") fid = cur.fetchone()[0]
fix escalator lock HG-- branch : feature/dcs
@@ -17,11 +17,11 @@ from noc.core.scheduler.scheduler import Scheduler class EscalatorService(Service): name = "escalator" + leader_lock_name = "escalator" def __init__(self): super(EscalatorService, self).__init__() self.scheduler = None - leader_lock_name = "escalator" @tornado.gen.coroutine def on_activate(self):
INSTALL.md: break the big blocks of code, and remove the space Remove the first space in the block of code as it is not necessary. This ``` $ python --version ``` Becomes this ``` $ python --version ``` Also break the big block of code into individual blocks.
@@ -26,11 +26,12 @@ Could not connect to daemon. Are you sure it's running? macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/). These environment variables also need to be set: -1. PYTHONUNBUFFERED=1 -2. EVENT_NOKQUEUE=1 +``` +PYTHONUNBUFFERED=1 +EVENT_NOKQUEUE=1 +``` Remaining dependencies can then be installed by running: - ``` brew install python protobuf ``` @@ -40,7 +41,6 @@ Assistance installing Python3: https://docs.python-guide.org/starting/install3/o ### Linux On Ubuntu (we recommend 18.04 or 20.04), install the following: - ``` sudo add-apt-repository ppa:deadsnakes/ppa sudo apt-get update @@ -59,50 +59,66 @@ If you're running another Linux distro, install the equivalent of the above pack ### Linux/Mac -To install on Linux/Mac: - - ``` Clone the repository: +``` $ git clone https://github.com/lbryio/lbry-sdk.git $ cd lbry-sdk +``` Create a Python virtual environment for lbry-sdk: +``` $ python3.7 -m venv lbry-venv +``` - Activating lbry-sdk virtual environment: +Activate virtual environment: +``` $ source lbry-venv/bin/activate +``` - Make sure you're on Python 3.7+ (as the default Python in virtual environment): +Make sure you're on Python 3.7+ as default in the virtual environment: +``` $ python --version +``` Install packages: +``` $ make install +``` If you are on Linux and using PyCharm, generates initial configs: +``` $ make idea ``` To verify your installation, `which lbrynet` should return a path inside of the `lbry-venv` folder. +``` +(lbry-venv) $ which lbrynet +/opt/lbry-sdk/lbry-venv/bin/lbrynet +``` To exit the virtual environment simply use the command `deactivate`. ### Windows -To install on Windows: - - ``` Clone the repository: +``` > git clone https://github.com/lbryio/lbry-sdk.git > cd lbry-sdk +``` Create a Python virtual environment for lbry-sdk: +``` > python -m venv lbry-venv +``` - Activating lbry-sdk virtual environment: +Activate virtual environment: +``` > lbry-venv\Scripts\activate +``` Install packages: +``` > pip install -e . ``` @@ -115,12 +131,10 @@ The easiest way to start it is using docker with: ```bash make elastic-docker ``` -Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html). - +Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html). To run the unit and integration tests from the repo directory: - ``` python -m unittest discover tests.unit python -m unittest discover tests.integration
Explicitly set HardwareParams in test_auto_scheduler_sketch_generation. * This test depended on the number of CPU cores available, and failed when cores < 4.
""" Test sketch generation. """ +import sys import tvm import tvm.testing + +import pytest + from tvm import te, auto_scheduler from tvm.auto_scheduler import _ffi_api from tvm.auto_scheduler.loop_state import Stage @@ -39,7 +43,14 @@ from test_auto_scheduler_common import ( def generate_sketches( workload_func, args, target, print_for_debug=False, init_search_callbacks=None ): - task = auto_scheduler.SearchTask(func=workload_func, args=args, target=target) + # NOTE: test_cpu_matmul_sketch and test_cpu_max_pool2d_sketch assume 4 cores to trigger all + # possible sketch generations. + task = auto_scheduler.SearchTask( + func=workload_func, + args=args, + target=target, + hardware_params=auto_scheduler.HardwareParams(num_cores=4, target=target), + ) policy = auto_scheduler.SketchPolicy( task, verbose=0, init_search_callbacks=init_search_callbacks ) @@ -440,18 +451,4 @@ def test_cuda_zero_rank_sketch(): if __name__ == "__main__": - test_cpu_matmul_sketch() - test_cpu_conv2d_bn_relu_sketch() - test_cpu_max_pool2d_sketch() - test_cpu_min_sketch() - test_cpu_softmax_sketch() - test_cpu_conv2d_winograd_sketch() - test_cpu_zero_rank_sketch() - test_cpu_custom_sketch() - test_cuda_matmul_sketch() - test_cuda_conv2d_bn_relu_sketch() - test_cuda_max_pool2d_sketch() - test_cuda_min_sketch() - test_cuda_softmax_sketch() - test_cuda_conv2d_winograd_sketch() - test_cuda_zero_rank_sketch() + sys.exit(pytest.main([__file__] + sys.argv[1:]))
ENH Added point_size option for voronoi_plot_2d * ENH Added point_size option for voronoi_plot_2d * ENH Added point_size option for voronoi_plot_2d changed default point_size to None * ENH default point_size changed to None
@@ -136,6 +136,9 @@ def voronoi_plot_2d(vor, ax=None, **kw): Specifies the line width for polygon boundaries line_alpha: float, optional Specifies the line alpha for polygon boundaries + point_size: float, optional + Specifies the size of points + Returns ------- @@ -157,7 +160,8 @@ def voronoi_plot_2d(vor, ax=None, **kw): raise ValueError("Voronoi diagram is not 2-D") if kw.get('show_points', True): - ax.plot(vor.points[:,0], vor.points[:,1], '.') + point_size = kw.get('point_size', None) + ax.plot(vor.points[:,0], vor.points[:,1], '.', markersize=point_size) if kw.get('show_vertices', True): ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
Fix NameError for typing context manager. Fixes
@@ -51,7 +51,7 @@ class Typing: await asyncio.sleep(5) def __enter__(self): - self.task = create_task(self.do_typing(), loop=self.loop) + self.task = asyncio.ensure_future(self.do_typing(), loop=self.loop) self.task.add_done_callback(_typing_done_callback) return self
Fixed tests fixed name of SARSALambda (was SARSALambdaDiscrete)
@@ -114,7 +114,7 @@ def test_sarsa(): def test_sarsa_lambda_discrete(): - alg = SARSALambdaDiscrete(pi, mdp.info, Parameter(.1), .9) + alg = SARSALambda(pi, mdp.info, Parameter(.1), .9) alg.Q.table = np.arange(np.prod(mdp.info.size)).reshape( mdp.info.size).astype(np.float)
[commands] update sys.modules in load_extension again introduced a regression: loading a module that is not in a package does not add it to sys.modules. Updating sys.modules is required after all.
@@ -590,19 +590,23 @@ class BotBase(GroupMixin): def _load_from_module_spec(self, spec, key): # precondition: key not in self.__extensions lib = importlib.util.module_from_spec(spec) + sys.modules[key] = lib try: spec.loader.exec_module(lib) except Exception as e: + del sys.modules[key] raise errors.ExtensionFailed(key, e) from e try: setup = getattr(lib, 'setup') except AttributeError: + del sys.modules[key] raise errors.NoEntryPointError(key) try: setup(self) except Exception as e: + del sys.modules[key] self._remove_module_references(lib.__name__) self._call_module_finalizers(lib, key) raise errors.ExtensionFailed(key, e) from e @@ -635,7 +639,7 @@ class BotBase(GroupMixin): NoEntryPointError The extension does not have a setup function. ExtensionFailed - The extension setup function had an execution error. + The extension or its setup function had an execution error. """ if name in self.__extensions:
Turn uidl() into an overload. The previous change was still right. Unique among `poplib` functions, `uidl()` returns a long response when called without arguments, but just a bytes string when called with a `which` argument.
@@ -4,7 +4,7 @@ from mypy_extensions import NoReturn import socket import ssl import sys -from typing import Any, BinaryIO, Dict, List, Optional, Pattern, Text, Tuple +from typing import Any, BinaryIO, Dict, List, Optional, overload, Pattern, Text, Tuple _LongResp = Tuple[bytes, List[bytes], int] @@ -49,7 +49,12 @@ class POP3: else: def apop(self, user: Text, password: Text) -> bytes: ... def top(self, which: Any, howmuch: int) -> _LongResp: ... - def uidl(self, which: Optional[Any] = ...) -> _LongResp: ... + + @overload + def uidl(self) -> _LongResp: ... + @overload + def uidl(self, which: Any) -> bytes: ... + if sys.version_info >= (3, 5): def utf8(self) -> bytes: ... if sys.version_info >= (3, 4):