message
stringlengths
13
484
diff
stringlengths
38
4.63k
new FixedTCS, ElastodynamicsBasicTCS in new sfepy/solvers/ts_controllers.py update solver_table
@@ -13,8 +13,8 @@ solver_files = [name for name in solver_files solver_table = load_classes(solver_files, [AutoFallbackSolver, LinearSolver, NonlinearSolver, - TimeSteppingSolver, EigenvalueSolver, - QuadraticEVPSolver, + TimeStepController, TimeSteppingSolver, + EigenvalueSolver, QuadraticEVPSolver, OptimizationSolver], package_name='sfepy.solvers')
Fix test tags Some tests tags are accidentally changed in
@@ -71,7 +71,7 @@ py_test_module_list( "test_protobuf_compatibility.py" ], size = "medium", - tags = ["exclusive", "client_tests", "team:serverless"], + tags = ["exclusive", "medium_size_python_tests_a_to_j", "team:core"], deps = ["//:ray_lib", ":conftest"], ) @@ -80,7 +80,7 @@ py_test( srcs = ["test_joblib.py"], data = ["mnist_784_100_samples.pkl"], size = "medium", - tags = ["exclusive", "client_tests", "team:serverless"], + tags = ["exclusive", "medium_size_python_tests_a_to_j", "team:core"], deps = ["//:ray_lib", ":conftest"], )
drop unnecessary soname check on OpenBSD The changelog for the 4.1 release says that No longer automatically disable setting SONAME on shared libraries on OpenBSD. this bit was left out.
@@ -57,13 +57,6 @@ obj = env.SharedObject('bar', 'foo.c') Default(env.Library(target='foo', source=obj)) """) -test.write('SConstructBaz', """ -env = Environment() -env['SHLIBVERSION'] = '1.0.0' -obj = env.SharedObject('baz', 'foo.c') -Default(env.SharedLibrary(target='baz', source=obj)) -""") - test.write('foo.c', r""" #include <stdio.h> @@ -288,12 +281,6 @@ main(int argc, char *argv[]) test.run(program = test.workpath('progbar'), stdout = "f4.c\nprogbar.c\n") -if sys.platform.startswith('openbsd'): - # Make sure we don't link libraries with -Wl,-soname on OpenBSD. - test.run(arguments = '-f SConstructBaz') - for line in test.stdout().split('\n'): - test.fail_test(line.find('-Wl,-soname=libbaz.so') != -1) - test.pass_test() # Local Variables:
Fix ordering for user nodes This separates the nodes fix from **Purpose** Fix sorting nodes in the dashboard by modified date **Changes** Sort nodes by last_logged instead of modified **Ticket**
@@ -281,7 +281,7 @@ class UserNodes(JSONAPIBaseView, generics.ListAPIView, UserMixin, NodesFilterMix view_category = 'users' view_name = 'user-nodes' - ordering = ('-modified',) + ordering = ('-last_logged',) # overrides NodesFilterMixin def get_default_queryset(self):
Make parallel scheduler index function more efficient _index_of_txn_in_schedule is called frequently, and it was building out an entire list of transaction ids just to find the index of one of them. It's faster to use a lazy enumeration.
@@ -774,8 +774,11 @@ class ParallelScheduler(Scheduler): for prior in self._batches[:index_of_batch_in_schedule]: number_of_txns_in_prior_batches += len(prior.transactions) - 1 - txn_ids_in_order = [t.header_signature for t in batch.transactions] - txn_index = txn_ids_in_order.index(txn_id) + txn_index, _ = next( + (i, t) + for i, t in enumerate(batch.transactions) + if t.header_signature == txn_id) + return number_of_txns_in_prior_batches + txn_index def _can_fail_fast(self, txn_id):
Update README.md Changed ordering
@@ -36,11 +36,11 @@ If you wish to contribute, please start by reading the [contribution guidelines] **Some external projects built using our API:** * [Searchable collection](https://too-many-incidents.netlify.app/) of incidents and related evidence -* [List](https://policebrutality.netlify.app/) of incidents that you can filter by city and state +* [Android app](https://github.com/amardeshbd/android-police-brutality-incidents) to view incidents sorted by location +* [List of incidents](https://policebrutality.netlify.app/) that you can filter by city and state * [Timeline](https://bread.codes/PoliceBrutality/) of police brutality events * [Dashboard](https://datastudio.google.com/s/oFSSsjw2kAY) with stats at the city and state levels * [Android app](https://github.com/andrewsnyder328/2020PoliceBrutalityApk/) to visually view posts -* [Android app](https://github.com/amardeshbd/android-police-brutality-incidents) to view all incidents by location _(open-source)_ ## Background
Telegram notification bug fixing Notifications were being sent but, without the 'return True' at the end of method sendToTelegram, the encounter cache was being cleaned up (since notified = False at monocle/notification.py, class Notifier), which in turn prompted multiple subsequent notifications of past encounters (because they were being treated as new).
@@ -395,6 +395,7 @@ class Notification: self.log.error('Error {} from Telegram: {}', e.code, e.message) return False self.log.info('Sent a Telegram notification about {}.', self.name) + return True except (ClientError, DisconnectedError) as e: err = e.__cause__ or e self.log.error('{} during Telegram notification.', err.__class__.__name__)
removed self.skipTest in _test_outout method. Fix
@@ -87,9 +87,8 @@ class IOHandlerTest(unittest.TestCase): source = StringIO(text_type(self._value)) self.iohandler.stream = source - self.skipTest('Memory object not implemented') - self.assertEqual(stream_val, self.iohandler.memory_object, - 'Memory object obtained') + #self.assertEqual(stream_val, self.iohandler.memory_object, + # 'Memory object obtained') def test_data(self): """Test data input IOHandler"""
Lexical envs: reword comments for Resolve/Get_Env's Info formals TN:
@@ -254,9 +254,8 @@ package Langkit_Support.Lexical_Env is No_Env_Getter : constant Env_Getter := (False, Null_Lexical_Env); procedure Resolve (Self : in out Env_Getter; Info : Entity_Info); - -- Resolve the reference for this env getter. If Info is passed, then it - -- corresponds to the entity info that should be associated to the node of - -- the env getter. + -- Resolve the reference for this env getter. Info is forwarded to the + -- resolver callback. function Simple_Env_Getter (E : Lexical_Env) return Env_Getter; -- Create a static Env_Getter (i.e. pointer to environment) @@ -268,9 +267,8 @@ package Langkit_Support.Lexical_Env is function Get_Env (Self : in out Env_Getter; Info : Entity_Info) return Lexical_Env; - -- Return the environment associated to the Self env getter. If Info is - -- passed, then it corresponds to the entity info that should be - -- associated to the node of the env getter. + -- Return the environment associated to the Self env getter. If Self is + -- dynamic, Info is forwarded to the resolver callback. function Equivalent (L, R : Env_Getter) return Boolean; -- If at least one of L and R is a dynamic env getter, raise a
css: Remove redundant declaration from night mode CSS. This declaration already exists in the default CSS. This declaration was present when the edit history modal was first given a night mode (then called "dark mode") style in November 2017 in It also existed in the default CSS at that time.
@@ -491,7 +491,6 @@ on a dark background, and don't change the dark labels dark either. */ .highlight_text_deleted { color: hsl(0, 90%, 67%); background-color: hsla(7, 54%, 62%, 0.38); - text-decoration: line-through; } } }
Fix enabled log_exchange issue If log_exchange is enabled, it fails to load v2 app. This patch fixes this issue by initializing debug middleware after setup_app method as 'Debug' middleware expects global_conf type as dict instead of 'pecan.conf' which doesn't have copy method. Closes-Bug:
@@ -55,10 +55,6 @@ def setup_app(pecan_config=None, extra_hooks=None): guess_content_type_from_ext=False ) - # WSGI middleware for debugging - if CONF.log_exchange: - app = debug.Debug.factory(pecan_config)(app) - # WSGI middleware for Keystone auth # NOTE(sbauza): ACLs are always active unless for unittesting where # enable_acl could be set to False @@ -78,4 +74,7 @@ def make_app(): } # NOTE(sbauza): Fill Pecan config and call modules' path app.setup_app() app = pecan.load_app(config) + # WSGI middleware for debugging + if CONF.log_exchange: + app = debug.Debug.factory(config)(app) return app
chore(deps): Bump rq from 1.8.1 to 1.10.1 Exciting improvements to rq! Cleanup & bugs that made us sccratch heads in prod. changelog:
@@ -56,7 +56,7 @@ redis~=3.5.3 requests-oauthlib~=1.3.0 requests~=2.25.1 RestrictedPython~=5.1 -rq~=1.8.0 +rq~=1.10.1 rsa>=4.1 # not directly required, pinned by Snyk to avoid a vulnerability schedule~=1.1.0 semantic-version~=2.8.5
Reduce available angles in supremacy examples This collection of angles reflects what we actually calibrate on the Xmons. Review:
@@ -48,7 +48,7 @@ def _make_random_single_qubit_op_layer( device: google.XmonDevice, randint: Callable[[int, int], int]) -> Iterable[cirq.Operation]: for q in device.qubits: - angle = randint(0, 7) / 4 + angle = randint(0, 3) / 2 axis = randint(0, 7) / 4 if angle: yield google.ExpWGate(half_turns=angle, axis_half_turns=axis).on(q)
fix: Use ImportError instead of ModuleNotFoundError ModuleNotFoundError is available in python 3.6
@@ -106,7 +106,7 @@ def print_by_server(doctype, name, print_format=None, doc=None, no_letterhead=0) print_settings = frappe.get_doc("Print Settings") try: import cups - except ModuleNotFoundError: + except ImportError: frappe.throw("You need to install pycups to use this feature!") return try:
Bulk operation bug fix Removed a hardcoded test data
@@ -856,12 +856,12 @@ frappe.views.ListView = class ListView extends frappe.views.BaseList { && field_doc.fieldtype !== 'Read Only' && !field_doc.hidden && !field_doc.read_only; }; - const has_editable_fields = () => { + const has_editable_fields = (doctype) => { return frappe.meta.get_docfields(doctype).some(field_doc => is_field_editable(field_doc)); }; - const has_submit_permission = () => { - return frappe.perm.has_perm('Sales Order', 0, 'submit'); + const has_submit_permission = (doctype) => { + return frappe.perm.has_perm(doctype, 0, 'submit'); }; // utility @@ -954,7 +954,7 @@ frappe.views.ListView = class ListView extends frappe.views.BaseList { } // Bulk submit - if (frappe.model.is_submittable(doctype) && has_submit_permission()) { + if (frappe.model.is_submittable(doctype) && has_submit_permission(doctype)) { actions_menu_items.push(bulk_submit()); } @@ -964,7 +964,7 @@ frappe.views.ListView = class ListView extends frappe.views.BaseList { } // bulk edit - if (has_editable_fields()) { + if (has_editable_fields(doctype)) { actions_menu_items.push(bulk_edit()); }
fill defaults_variant explicitly No need to overwrite method
@@ -31,7 +31,7 @@ class BatchMovCreator(TrayPublishCreator): def __init__(self, project_settings, *args, **kwargs): super(BatchMovCreator, self).__init__(project_settings, *args, **kwargs) - self._default_variants = (project_settings["traypublisher"] + self.default_variants = (project_settings["traypublisher"] ["BatchMovCreator"] ["default_variants"]) @@ -152,9 +152,6 @@ class BatchMovCreator(TrayPublishCreator): return task_name - def get_default_variants(self): - return self._default_variants - def get_instance_attr_defs(self): return []
Pin setuptools to 59.6.0. Versions later than that are arbitrarily adding an additional local path, throwing all of our tools off. This is meant to be a temporary fix until we completely remove the need to use pip to install any packages.
@@ -64,7 +64,7 @@ RUN python3 -u /tmp/wrapper_scripts/apt.py update-install-clean -q -y git python RUN python3 -u /tmp/wrapper_scripts/apt.py update-install-clean -q -y python3-pip @# colcon-core.package_identification.python needs at least setuptools 30.3.0 @# pytest-rerunfailures enables usage of --retest-until-pass -RUN pip3 install -U setuptools pytest-rerunfailures +RUN pip3 install -U setuptools==59.6.0 pytest-rerunfailures @[end if]@ RUN python3 -u /tmp/wrapper_scripts/apt.py update-install-clean -q -y ccache
mark custom primary keys as experimental for now I'll revert this change once custom primary keys are supported fully in Piccolo admin
@@ -24,12 +24,13 @@ For a full list of columns, see :ref:`ColumnTypes`. ------------------------------------------------------------------------------- Primary Key ---------------- +----------- -You can specify your ``PrimaryKey`` with any column type by passing ``primary_key`` to the ``Column``. +Piccolo tables are automatically given a primary key column called ``id``, +which is an auto incrementing integer. -It is used to uniquely identify a row, and is referenced by ``ForeignKey`` -columns on other tables. +There is currently experimental support for specifying a custom primary key +column. For example: .. code-block:: python @@ -42,22 +43,6 @@ columns on other tables. id = UUID(primary_key=True) name = Varchar(length=100) -If you don't specify a ``PrimaryKey``, the table is automatically given a ``PrimaryKey`` column called ``id``, which -is an auto incrementing integer. - -This is equivalent to: - -.. code-block:: python - - # tables.py - from piccolo.table import Table - from piccolo.columns import Serial, Varchar - - - class Band(Table): - id = Serial(primary_key=True) - name = Varchar(length=100) - ------------------------------------------------------------------------------- Tablename
Update base image for cloud builds I'd like to see if this can speed up the build speed
# To push a new image, run 'gcloud builds submit --project=cloud-eng-council --tag gcr.io/cloud-eng-council/make .' # from this directory. -FROM debian +FROM python:3.8-slim # install core tools -RUN apt-get update && apt-get install -y build-essential python python-pip python3 python3-pip +RUN apt-get update && apt-get install -y build-essential # install yapf RUN pip install yapf
Generate urdf files out of source. Generate urdf files to $CMAKE_CURRENT_BINARY_DIR rather than $CMAKE_CURRENT_SOURCE_DIR
@@ -16,4 +16,4 @@ install(DIRECTORY meshes install(DIRECTORY launch DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}) -xacro_add_files(models/soccerbot.xacro OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/models/soccerbot.urdf TARGET media_files) \ No newline at end of file +xacro_add_files(models/soccerbot.xacro OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/models/soccerbot.urdf TARGET media_files)
csv_export: Exclude A1 for LD This is as per CTC
@@ -67,7 +67,7 @@ start_rows = { } # CTC Configs -# LD : ctc_sets_mandatory +# LD : ctc_sets_mandatory - A1-4K # RA: ctc_sets_mandatory + ctc_sets_optional # AI: ctc_sets_mandatory_ai + ctc_sets_optional # AS: A1 with Downsampling @@ -81,6 +81,7 @@ ctc_sets_mandatory = [ "aomctc-b2-syn"] ctc_sets_mandatory_ai = ctc_sets_mandatory + \ ["aomctc-f1-hires", "aomctc-f2-midres"] +ctc_sets_mandatory_ld = [x for x in ctc_sets_mandatory if x != 'aomctc-a1-4k'] ctc_sets_optional = ["aomctc-g1-hdr-4k", "aomctc-g2-hdr-2k", "aomctc-e-nonpristine"] @@ -207,13 +208,15 @@ def return_ctc_set_list(run_info, config): elif config == 'av2-ra-st' or config == 'av2-ra': run_set_list = ctc_sets_mandatory + ctc_sets_optional elif config == 'av2-ld': - run_set_list = ctc_sets_mandatory + run_set_list = ctc_sets_mandatory_ld else: run_set_list = [run_info['task']] elif 'aomctc-mandatory' in set_name and ('av2' in config or 'vvc' in config): if config in ['av2-ra-st', 'av2-ra', 'vvc-vtm', 'vvc-vtm-ra', 'vvc-vtm-ra-ctc', 'vvc-vtm-as-ctc', 'vvc-vtm-ra-st', 'vvc-vtm-ld']: run_set_list = ctc_sets_mandatory + elif config in ['av2-ld', 'vvc-ra-ld']: + run_set_list = ctc_sets_mandatory_ld elif config in ['av2-ai', 'vvc-vtm-ai']: run_set_list = ctc_sets_mandatory_ai else:
SConstruct : Allow doc scripts to disable target caching We don't yet have a good mechanism to pass dynamic targets from doc generation scripts to scons.
@@ -1462,6 +1462,8 @@ def locateDocs( docRoot, env ) : if targets: command = env.Command( targets, sourceFile, generateDocs ) env.Depends( command, "build" ) + if line.startswith( "# UndeclaredBuildTargets" ) : + env.NoCache( command ) # Force the commands to run serially, in case the doc generation # has been run in parallel. Otherwise we can get overlapping # screengrabs from the commands that launch Gaffer UIs.
Update exercises/concept/currency-exchange/.docs/instructions.md More clear instructions
@@ -64,7 +64,8 @@ Create the `exchangeable_value()` function, taking `budget`, `exchange_rate`, `s Parameter `spread` is the *percentage taken* as an exchange fee. If `1.00 EUR == 1.20 USD` and the *spread* is `10`, the actual exchange will be: `1.00 EUR == 1.32 USD`. -This function should return the maximum available value after *exchange rate* and the *denomination*. +This function should return the maximum value of the new currency after calculating the *exchange rate* plus the *spread*. +Remember that the currency *denomination* is a whole number, and cannot be sub-divided. **Note:** Returned value should be `int` type.
add pinholeSize = illumination.imageSize() Prints kind of a matrix everytime the imageSize() function is called in the for loop. A bit annoying. Is there a was to take it off?
@@ -12,7 +12,7 @@ pinholeModifier = {1/3:[], 1:[], 3:[]} # Dictionary having a pinhole factor with for pinhole in pinholeModifier: finalRays = pinholeModifier[pinhole] - pinholeSize = 0.009374*pinhole # Ideal size of the pinhole times a certain factor + #pinholeSize = 0.009374*pinhole # Ideal size of the pinhole times a certain factor positions = [1000, 800, 500, 300, 150, 100, 50, 25, 0, -25, -50, -100, -150, -300, -500, -800, -1000] # list of all relative positions from the ideal focal spot position in nm print(".") @@ -36,6 +36,7 @@ for pinhole in pinholeModifier: illumination.append(Space(d=90)) illumination.append(Lens(f=50)) illumination.append(Space(d=50)) + pinholeSize = illumination.imageSize()*pinhole # Ideal size of the pinhole times a certain factor illumination.append(Aperture(diameter=pinholeSize)) outputRays = illumination.traceManyThrough(inputRays, progress=False) # Counts how many rays makes it through the pinhole
Update ursnif.txt + trail, - dup
@@ -1360,9 +1360,12 @@ llohumas.today # Reference: https://twitter.com/reecdeep/status/1201448424064856064 -laodonaln.xyz newsitalybiz.club +# Reference: https://twitter.com/sugimu_sec/status/1201505212814569472 + +redxyzred.xyz + # Generic trails /%20%20%20%20.php
MAINT: Add more descriptive error message for phase one simplex. Phase one of the simplex method sometimes fails to converge to a basic feasible solution if the pseudo-objective function is very slightly greater than the set tolerance. The new message states this, the tolerance and pseudo-objective function value explicitly
@@ -789,9 +789,22 @@ def _linprog_simplex(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, else: # Failure to find a feasible starting point status = 2 + message = ( + "Phase 1 of the simplex method failed to find a feasible " + "solution. The pseudo-objective function evaluates to {0:.1e} " + "which exceeds the required tolerance of {1} for a solution to be " + "considered 'close enough' to zero be considered a basic feasible " + "solution. " + "Consider increasing the tolerance to be greater than {0:.1e}. " + "If this tolerance is unnacceptably large the problem may be " + "infeasible.".format(abs(T[-1, -1]), tol) + ) + if status != 0: + if not message: message = messages[status] + if disp: print(message) return OptimizeResult(x=np.nan, fun=-T[-1, -1], nit=nit1,
ebuild.ebd: eapi7: system root dir is represented by the empty string As clarified in a recent PMS update.
@@ -205,17 +205,17 @@ class ebd(object): def set_path_vars(env, pkg, domain): # XXX: note this is just EAPI 3 and EAPI 7 compatibility; not full prefix, soon.. trailing_slash = pkg.eapi.options.trailing_slash - env['ROOT'] = domain.root.rstrip(trailing_slash) + trailing_slash + env['ROOT'] = domain.root.rstrip(os.sep) + trailing_slash env['PKGCORE_PREFIX_SUPPORT'] = 'false' if pkg.eapi.options.prefix_capable: env['EPREFIX'] = domain.prefix.rstrip(os.sep) env['EROOT'] = ( - pjoin(env['ROOT'].rstrip(trailing_slash), env['EPREFIX']) + - trailing_slash) + pjoin(env['ROOT'].rstrip(trailing_slash), env['EPREFIX']) + + trailing_slash) env['PKGCORE_PREFIX_SUPPORT'] = 'true' if pkg.eapi.options.has_sysroot: - env['SYSROOT'] = env['ROOT'].rstrip(os.sep) + env['SYSROOT'] = env['ROOT'] env['ESYSROOT'] = pjoin(env['SYSROOT'], env['EPREFIX']) env['BROOT'] = env['EPREFIX']
Let histogram rows in JSON report be dicts, not lists This will allow us to add more information to each row in a backwards-compatible manner if we want to.
Routines for printing a report. """ import sys +from dataclasses import dataclass from io import StringIO import textwrap from collections import Counter, defaultdict @@ -209,11 +210,10 @@ class Statistics: else: eranges = None base_stats = AdjacentBaseStatistics(end_statistics.adjacent_bases) - trimmed_lengths = [ - OneLine((length, count, error_counts)) - for (length, count, _, _, error_counts) - in histogram_rows(end_statistics, n, gc_content) - ] + trimmed_lengths = { + row.length: OneLine({"counts": row.error_counts}) + for row in histogram_rows(end_statistics, n, gc_content) + } ends.append({ "error_rate": end_statistics.max_error_rate, "error_lengths": OneLine(eranges), @@ -361,30 +361,38 @@ def histogram(end_statistics: EndStatistics, n: int, gc_content: float) -> str: print("length", "count", "expect", "max.err", "error counts", sep="\t", file=sio) for row in histogram_rows(end_statistics, n, gc_content): - length, count, expect, max_err, error_counts = row print( - length, - count, - f"{expect:.1F}", - max_err, - " ".join(str(e) for e in error_counts), + row.length, + row.count, + f"{row.expect:.1F}", + row.max_err, + " ".join(str(e) for e in row.error_counts), sep="\t", file=sio, ) return sio.getvalue() + "\n" +@dataclass +class HistogramRow: + """One row in the "trimmed lengths" histogram""" + + length: int + count: int + expect: float + max_err: int + error_counts: List[int] + + def histogram_rows( end_statistics: EndStatistics, n: int, gc_content: float, -) -> Iterator[Tuple[int, int, float, int, List[int]]]: +) -> Iterator[HistogramRow]: """ - Yield tuples (length, count, expect, max_err, error_counts) + Yield histogram rows Include the no. of reads expected to be trimmed by chance (assuming a uniform distribution of nucleotides in the reads). - adapter_statistics -- EndStatistics object - adapter_length -- adapter length n -- total no. of reads. """ d = end_statistics.lengths @@ -398,14 +406,14 @@ def histogram_rows( count = d[length] max_errors = max(errors[length].keys()) error_counts = [errors[length][e] for e in range(max_errors + 1)] - t = ( - length, - count, - expect, - int(end_statistics.max_error_rate * min(length, end_statistics.effective_length)), - error_counts, + row = HistogramRow( + length=length, + count=count, + expect=expect, + max_err=int(end_statistics.max_error_rate * min(length, end_statistics.effective_length)), + error_counts=error_counts, ) - yield t + yield row class AdjacentBaseStatistics:
Add a sitelink with a summary in interwikidata.py The automatic summary "Updated item" isn't very useful.
@@ -186,7 +186,8 @@ class IWBot(ExistingPageBot, SingleSiteBot): item.title(asLink=True)) return False output('Adding link to %s' % item.title()) - item.setSitelink(self.current_page) + item.setSitelink(self.current_page, summary='Added %s' % ( + self.current_page.title(asLink=True, insite=item.site))) return item def try_to_merge(self, item):
Enable the intra-op parallelism for layer norm Summary: Pull Request resolved: We would like to enable the intra-op parallelism for layer norm. This will be mapped to the parallel performance win for the BERT/RoBERTa model. Test Plan: buck test mode/dev-nosan //caffe2/test:nn -- "LayerNorm"
@@ -34,7 +34,8 @@ void LayerNormKernelImplInternal( const T c = T(1) / static_cast<T>(N); const bool gamma_null = gamma_data == nullptr; const bool beta_null = beta_data == nullptr; - for (int64_t i = 0; i < M; ++i) { + at::parallel_for(0, M, 1, [&](int64_t start, int64_t end) { + for (int64_t i = start; i < end; ++i) { const T* X_ptr = X_data + i * N; T* Y_ptr = Y_data + i * N; T mean_val = T(0); @@ -56,6 +57,7 @@ void LayerNormKernelImplInternal( mean_data[i] = mean_val; rstd_data[i] = rstd_val; } + }); } void LayerNormKernelImpl(
gigasecond: Refactor test names Renames the tests according to the canonical test data and stores the test version.
-from datetime import datetime import unittest +from datetime import datetime + from gigasecond import add_gigasecond +# test cases adapted from `x-common//canonical-data.json` @ version: 1.0.0 + class GigasecondTest(unittest.TestCase): - def test_1(self): + def test_date_only_specification_of_time(self): self.assertEqual( add_gigasecond(datetime(2011, 4, 25)), datetime(2043, 1, 1, 1, 46, 40)) - def test_2(self): + def test_another_date_only_specification_of_time(self): self.assertEqual( add_gigasecond(datetime(1977, 6, 13)), datetime(2009, 2, 19, 1, 46, 40)) - def test_3(self): + def test_one_more_date_only_specification_of_time(self): self.assertEqual( add_gigasecond(datetime(1959, 7, 19)), datetime(1991, 3, 27, 1, 46, 40)) - def test_4(self): + def test_full_time_specified(self): self.assertEqual( add_gigasecond(datetime(2015, 1, 24, 22, 0, 0)), datetime(2046, 10, 2, 23, 46, 40)) - def test_5(self): + def test_full_time_with_day_roll_over(self): self.assertEqual( add_gigasecond(datetime(2015, 1, 24, 23, 59, 59)), datetime(2046, 10, 3, 1, 46, 39))
[Doc] Fix docstring of aten::Sort A small patch to
@@ -314,15 +314,11 @@ IdArray NonZero(NDArray array); * is always in int64. * * \param array Input array. - * \param num_bits The number of bits used by the range of values in the array, - * or 0 to use all bits of the type. This is currently only used when sort - * arrays on the GPU. - * \param num_bits The number of bits used in key comparison. The bits are - * right aligned. For example, setting `num_bits` to 8 means using bits from - * `sizeof(IdType) * 8 - num_bits` (inclusive) to `sizeof(IdType) * 8` - * (exclusive). Setting it to a small value could speed up the sorting if the - * underlying sorting algorithm is radix sort (e.g., on GPU). Setting it to - * value of zero, uses full number of bits of the type (sizeof(IdType)*8). + * \param num_bits The number of bits used in key comparison. For example, if the data type + * of the input array is int32_t and `num_bits = 8`, it only uses bits in index + * range [0, 8) for sorting. Setting it to a small value could + * speed up the sorting if the underlying sorting algorithm is radix sort (e.g., on GPU). + * Setting it to zero (default value) means using all the bits for comparison. * On CPU, it currently has no effect. * \return A pair of arrays: sorted values and sorted index to the original position. */
Fixed small problem with tests indent error local error with "travis" keyword
@@ -61,7 +61,7 @@ class Test_MeasurementControl(unittest.TestCase): self.assertEqual(dat['value_units'], ['mV', 'mV']) @unittest.skipIf( - "TRAVIS" in os.environ or os.environ["TRAVIS"] == "true", + "TRAVIS" in os.environ, "Skipping this test on Travis CI.") def test_data_location(self): sweep_pts = np.linspace(0, 10, 30)
Workaround for newer GEOS versions. See also
@@ -1459,6 +1459,10 @@ class Basemap(object): # convert polygons to line segments poly = _geoslib.LineString(poly.boundary) else: + # this is a workaround to avoid + # GEOS_ERROR: CGAlgorithmsDD::orientationIndex encountered NaN/Inf numbers + b[np.isposinf(b)] = 1e20 + b[np.isneginf(b)] = -1e20 poly = Shape(b) # this is a workaround to avoid # "GEOS_ERROR: TopologyException:
Tell pyup to ignore the Python2-conditional dependencies See comments on PR 778: In short, pyup is trying to update dependencies that are active only when Python2 is being used to versions that no longer support Python2.
# -e . asn1crypto==0.24.0 -astroid==1.6.5 ; python_version < "3.0" +astroid==1.6.5 ; python_version < "3.0" # pyup: ignore astroid==2.0.4 ; python_version >= "3.0" backports.functools-lru-cache==1.5 bandit==1.4.0 @@ -28,7 +28,7 @@ pluggy==0.7.1 py==1.5.4 pycparser==2.18 pylint==2.1.1 ; python_version >= "3.0" -pylint==1.9.3 ; python_version < "3.0" +pylint==1.9.3 ; python_version < "3.0" # pyup: ignore pynacl==1.2.1 pyyaml==3.13 securesystemslib[crypto,pynacl]==0.11.2
Dev environment should have jupyterlab mock is not required anymore
@@ -36,7 +36,7 @@ A pull request for which you do not need to contact us in advance is the additio Most of Jupytext's code is written in Python. To develop the Python part of Jupytext, you should clone Jupytext, then create a dedicated Python env: ``` cd jupytext -conda create -n jupytext-dev python=3.6 notebook mock pyyaml +conda create -n jupytext-dev jupyterlab pyyaml conda activate jupytext-dev pip install -r requirements*.txt ```
DOC: add formula and documentation improvements for scipy.special.chndtr and its inverses [ci skip]
@@ -1840,19 +1840,30 @@ add_newdoc("chdtriv", """) add_newdoc("chndtr", - """ + r""" chndtr(x, df, nc, out=None) Non-central chi square cumulative distribution function + The cumulative distribution function is given by: + + .. math:: + + P(\chi^{\prime 2} \vert \nu, \lambda) =\sum_{j=0}^{\infty} + e^{-\lambda /2} + \frac{(\lambda /2)^j}{j!} P(\chi^{\prime 2} \vert \nu + 2j), + + where :math:`\nu > 0` is the degrees of freedom (``df``) and + :math:`\lambda \geq 0` is the non-centrality parameter (``nc``). + Parameters ---------- x : array_like - Upper bound of the integral + Upper bound of the integral; must satisfy ``x >= 0`` df : array_like - Degrees of freedom + Degrees of freedom; must satisfy ``df > 0`` nc : array_like - Non-centrality parameter + Non-centrality parameter; must satisfy ``nc >= 0`` out : ndarray, optional Optional output array for the function results @@ -1873,14 +1884,17 @@ add_newdoc("chndtrix", Inverse to `chndtr` vs `x` + Calculated using a search to find a value for `x` that produces the + desired value of `p`. + Parameters ---------- p : array_like - Probability + Probability; must satisfy ``0 <= p < 1`` df : array_like - Degrees of freedom + Degrees of freedom; must satisfy ``df > 0`` nc : array_like - Non-centrality parameter + Non-centrality parameter; must satisfy ``nc >= 0`` out : ndarray, optional Optional output array for the function results @@ -1903,14 +1917,17 @@ add_newdoc("chndtridf", Inverse to `chndtr` vs `df` + Calculated using a search to find a value for `df` that produces the + desired value of `p`. + Parameters ---------- x : array_like - Upper bound of the integral + Upper bound of the integral; must satisfy ``x >= 0`` p : array_like - Probability + Probability; must satisfy ``0 <= p < 1`` nc : array_like - Non-centrality parameter + Non-centrality parameter; must satisfy ``nc >= 0`` out : ndarray, optional Optional output array for the function results @@ -1931,15 +1948,17 @@ add_newdoc("chndtrinc", Inverse to `chndtr` vs `nc` + Calculated using a search to find a value for `df` that produces the + desired value of `p`. Parameters ---------- x : array_like - Upper bound of the integral + Upper bound of the integral; must satisfy ``x >= 0`` df : array_like - Degrees of freedom + Degrees of freedom; must satisfy ``df > 0`` p : array_like - Probability + Probability; must satisfy ``0 <= p < 1`` out : ndarray, optional Optional output array for the function results
Add NoNodeError to get_brokers & get_topics Catch Exceptions for commands under empty cluster exception
@@ -101,8 +101,13 @@ class ZK: :rtype : dict of brokers """ + try: broker_ids = self.get_children("/brokers/ids") - + except NoNodeError: + _log.error( + "cluster is empty." + ) + return {} # Return broker-ids only if names_only: return {int(b_id): None for b_id in broker_ids} @@ -227,9 +232,16 @@ class ZK: accessing the zookeeper twice. If just partition-replica information is required fetch_partition_state should be set to False. """ + try: topic_ids = [topic_name] if topic_name else self.get_children( "/brokers/topics", ) + except NoNodeError: + _log.error( + "Cluster is empty." + ) + return {} + if names_only: return topic_ids topics_data = {}
Update test-notebooks.sh Skipping io_examples.ipynb because it requires user download
@@ -8,7 +8,7 @@ LIBCUDF_KERNEL_CACHE_PATH=${WORKSPACE}/.jitcache # Add notebooks that should be skipped here # (space-separated list of filenames without paths) -SKIPNBS="sdr_wfm_demod.ipynb sdr_integration.ipynb" +SKIPNBS="sdr_wfm_demod.ipynb sdr_integration.ipynb io_examples.ipynb" ## Check env env
Propagate instance docstr in getitem (jagged) Probably need to do the same for many other __getitem__
@@ -515,6 +515,7 @@ class JaggedArray(awkward.array.base.AwkwardArrayWithContent): out = cls.__new__(cls) out.__dict__.update(self.__dict__) out._content = content + out.__doc__ = content.__doc__ return out if isinstance(where, tuple) and len(where) == 0:
pmerge: toss unnecessary mutual checks for operations This should be done internally in argparse via mutual exclusion settings for all specified operations now.
@@ -391,12 +391,8 @@ def _validate(parser, namespace): if namespace.unmerge: if namespace.sets: parser.error("using sets with -C probably isn't wise, aborting") - if namespace.upgrade: - parser.error("cannot upgrade and unmerge simultaneously") if not namespace.targets: parser.error("you must provide at least one atom") - if namespace.clean: - parser.error("cannot use -C with --clean") if namespace.clean: if namespace.sets or namespace.targets:
Ensure to re-create validator keys on each run of the interop plugin Previously, using the same root dir and a set of _new_ keys would store them in the same root dir across runs and lead to loss of control over which validators were run based on the command line arguments
@@ -59,10 +59,6 @@ from trinity.config import ( from trinity.extensibility import ( BaseMainProcessPlugin, ) -from trinity.plugins.eth2.constants import ( - VALIDATOR_KEY_DIR, -) -from eth2.beacon.tools.fixtures.loading import load_config_at_path import ssz from eth2.beacon.types.states import BeaconState @@ -75,17 +71,6 @@ from trinity.plugins.eth2.network_generator.constants import ( from trinity.plugins.builtin.network_db.plugin import TrackingBackend -class Client: - name: str - client_dir: Path - validator_keys_dir: Path - - def __init__(self, name: str, root_dir: Path) -> None: - self.name = name - self.client_dir = root_dir / name - self.validator_keys_dir = self.client_dir / VALIDATOR_KEY_DIR - - class InteropPlugin(BaseMainProcessPlugin): @property def name(self) -> str: @@ -203,10 +188,12 @@ class InteropPlugin(BaseMainProcessPlugin): keys_file = Path('eth2/beacon/scripts/quickstart_state/keygen_16_validators.yaml') keys_dir = trinity_config.trinity_root_dir / KEYS_DIR try: - keys_dir.mkdir() - except FileExistsError: + shutil.rmtree(keys_dir) + except FileNotFoundError: pass + keys_dir.mkdir() + # parse the yaml... yaml = YAML(typ="unsafe") keys = yaml.load(keys_file)
Use an in-memory database for unit tests Speeds up the tests on my machine due to less hammering on the disk (as shown in iotop). I guess the same could be achieved with "pragma synchronous = off;" and file-backed database, but using a :memory: database is easier.
@@ -18,12 +18,14 @@ from sqlalchemy.orm import sessionmaker @pytest.fixture -def db(tmp_path): +def db(): """ - Create a temporary SQLite-backed database in a temp directory, and return the Session object. + Create a temporary SQLite-backed database in memory, and return the Session object. """ - db_path = tmp_path / "db.sqlite" - engine = create_engine(f"sqlite:///{db_path}", echo=False) + from sqlalchemy.pool import StaticPool + # The elaborate arguments are needed to get multithreaded access + engine = create_engine("sqlite://", connect_args={'check_same_thread':False}, + poolclass=StaticPool, echo=False) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine)
Set Github token for documentation generation in Github Action This PR sets `GITHUB_OAUTH_KEY` as an environment variable so can generate documentation without hitting API call limit
@@ -21,6 +21,8 @@ jobs: PYARROW_VERSION: 0.10.0 # DISPLAY=0.0 does not work in Github Actions with Python 3.5. Here we work around wtih xvfb-run PYTHON_EXECUTABLE: xvfb-run python + # Github token is required to auto-generate the release notes from Github release notes + GITHUB_OAUTH_KEY: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v2 - uses: actions/setup-java@v1 @@ -93,6 +95,8 @@ jobs: # The name of the directory '.cache' is for Travis CI. Once we remove Travis CI, # we should download Spark to a directory with a different name to prevent confusion. SPARK_CACHE_DIR: /home/runner/.cache/spark-versions + # Github token is required to auto-generate the release notes from Github release notes + GITHUB_OAUTH_KEY: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v2 - uses: actions/setup-java@v1
fix ln payments: set payto_e ln invoice correctly payto_e.lightning_invoice has to be set after the payment field is set to the node pub key, because check_text has the side effect of resetting the payto_e.lightning_invoice
@@ -1970,7 +1970,6 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): self.show_error(_("Error parsing Lightning invoice") + f":\n{e}") return - self.payto_e.lightning_invoice = invoice pubkey = bh2u(lnaddr.pubkey.serialize()) for k,v in lnaddr.tags: if k == 'd': @@ -1980,6 +1979,7 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): description = '' self.payto_e.setFrozen(True) self.payto_e.setText(pubkey) + self.payto_e.lightning_invoice = invoice self.message_e.setText(description) if lnaddr.get_amount_sat() is not None: self.amount_e.setAmount(lnaddr.get_amount_sat())
Remove comments within if-else block in .travis.yml This comment seems making CI not working for some reasons.
@@ -63,11 +63,9 @@ install: # one of the dependency requires Python 3.6 Conda specifically. - if [[ $TRAVIS_PYTHON_VERSION == "3.5" ]]; then pip install -r requirements-dev.txt; - # Show installed packages pip list; else conda install -c conda-forge --yes --file requirements-dev.txt; - # Show installed packages conda list; fi
Blender freeze with active material preview and viewport render. Improved working with material preview: made rpr_context be a global value for PreviewEngine, which is automatically removed if it was not used for some time (5 mins).
-import bpy +import threading +import bpy import pyrpr + from .engine import Engine from rprblender.export import object, camera, particle, world -from . import context from rprblender.utils import logging log = logging.Log(tag='PreviewEngine') +CONTEXT_LIFETIME = 300.0 # 5 minutes in seconds + + class PreviewEngine(Engine): """ Render engine for preview material, lights, environment """ TYPE = 'PREVIEW' + rpr_context = None + timer: threading.Timer = None # timer to remove rpr_context + def __init__(self, rpr_engine): super().__init__(rpr_engine) @@ -21,6 +28,28 @@ class PreviewEngine(Engine): self.render_samples = 0 self.render_update_samples = 1 + def _init_rpr_context(self, scene): + if PreviewEngine.timer: + PreviewEngine.timer.cancel() + + if not PreviewEngine.rpr_context: + log("Creating RPRContext") + PreviewEngine.rpr_context = self._RPRContext() + scene.rpr.init_rpr_context(PreviewEngine.rpr_context, is_final_engine=False) + PreviewEngine.rpr_context.scene.set_name(scene.name) + + self.rpr_context = PreviewEngine.rpr_context + + PreviewEngine.timer = threading.Timer(CONTEXT_LIFETIME, PreviewEngine._remove_rpr_context) + PreviewEngine.timer.start() + + @staticmethod + def _remove_rpr_context(): + log("Removing RPRContext") + # Here we remove only link to rpr_context instance. + # Real deletion will be applied after all links be lost. + PreviewEngine.rpr_context = None + def render(self): if not self.is_synced: return @@ -51,10 +80,9 @@ class PreviewEngine(Engine): scene = depsgraph.scene settings_scene = bpy.context.scene - settings_scene.rpr.init_rpr_context(self.rpr_context, is_final_engine=False) + self._init_rpr_context(scene) self.rpr_context.resize(scene.render.resolution_x, scene.render.resolution_y) - - self.rpr_context.scene.set_name(scene.name) + self.rpr_context.clear_scene() # export visible objects for obj in self.depsgraph_objects(depsgraph):
Clarify+test parsimony placement with unary nodes Fixes
@@ -2473,10 +2473,12 @@ class Tree: mutations)``, where ``ancestral_state`` is the allele assigned to the tree root(s) and ``mutations`` is a list of :class:`Mutation` objects, ordered as :ref:`required in a mutation table<sec_mutation_requirements>`. - For each mutation, ``node`` is the tree node at the bottom of the branch - on which the transition occurred, and ``derived_state`` is the new state - after this mutation. The ``parent`` property contains the index in the - returned list of the previous mutation on the path to root, or ``tskit.NULL`` + For each mutation, ``derived_state`` is the new state after this mutation and + ``node`` is the tree node immediately beneath the mutation (if there are unary + nodes between two branch points, hence multiple nodes above which the + mutation could be parsimoniously placed, the oldest node is used). The + ``parent`` property contains the index in the returned list of the previous + mutation on the path to root, or ``tskit.NULL`` if there are no previous mutations (see the :ref:`sec_mutation_table_definition` for more information on the concept of mutation parents). All other attributes of the :class:`Mutation` object are undefined and should not be used.
Add consensus to bind in Val Config File topic Update the bind setting to include the new consensus endpoint and show the default value, 127.0.0.1:5050.
@@ -29,20 +29,16 @@ example configuration options as necessary for your system. The ``validator.toml`` configuration file has the following options: -- ``bind`` = [ "``endpoint``", "``endpoint``" ] +- ``bind = [ "network:{endpoint}", "component:{endpoint}", "consensus:{endpoint}", ]`` - Sets the network and component endpoints. Default network bind interface: - ``tcp://127.0.0.1:8800``. Default component bind interface: - ``tcp://127.0.0.1:4004``. - - Each string has the format ``{option}:{endpoint}``, where - ``{option}`` is either ``network`` or ``component``. For example: + Sets the network, component, and consensus endpoints. The default values are: .. code-block:: none bind = [ "network:tcp://127.0.0.1:8800", - "component:tcp://127.0.0.1:4004" + "component:tcp://127.0.0.1:4004", + "consensus:tcp://127.0.0.1:5050" ] - ``peering = "{static,dynamic}"``
Update codeql-analysis.yml excluding tests from analysis
@@ -43,6 +43,8 @@ jobs: uses: github/codeql-action/init@v1 with: languages: ${{ matrix.language }} + config-file: ./.github/codeql-config.yml + queries: security-and-quality # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file.
Fix mismatched tags in HTML example <i> was closed using </b>
@@ -85,7 +85,7 @@ italic and underline: ``<b>``, ``<i>`` and ``<u>``. from prompt_toolkit import print_formatted_text, HTML print_formatted_text(HTML('<b>This is bold</b>')) - print_formatted_text(HTML('<i>This is italic</b>')) + print_formatted_text(HTML('<i>This is italic</i>')) print_formatted_text(HTML('<u>This is underlined</u>')) Further, it's possible to use tags for foreground colors:
return the list of files in the archive rather than the archive file itself.
@@ -292,7 +292,10 @@ class ExternalResources(object): root = os.path.dirname(full_path) with engine.open(full_path) as fd: fd.extractall(self.data_home) - result = [os.path.join(root, i.name) for i in fd.getmembers()] + if lodn.endswith("zip"): + result = [os.path.join(root, i) for i in fd.namelist()] + else: + result = [os.path.join(root, i) for i in fd.getnames()] return result def download_all(self, imgs=None):
Redis: remove erroneous `_redis` alias If a RedisCache instance was being accessed before bot has created the `redis_cache` instance, the `_redis` alias was being set to None, causing AttributeErrors in lookups. See:
@@ -226,7 +226,6 @@ class RedisCache: for attribute in vars(instance).values(): if isinstance(attribute, Bot): self.bot = attribute - self._redis = self.bot.redis_session return self else: error_message = ( @@ -251,7 +250,7 @@ class RedisCache: value = self._value_to_typestring(value) log.trace(f"Setting {key} to {value}.") - await self._redis.hset(self._namespace, key, value) + await self.bot.redis_session.hset(self._namespace, key, value) async def get(self, key: RedisKeyType, default: Optional[RedisValueType] = None) -> Optional[RedisValueType]: """Get an item from the Redis cache.""" @@ -259,7 +258,7 @@ class RedisCache: key = self._key_to_typestring(key) log.trace(f"Attempting to retrieve {key}.") - value = await self._redis.hget(self._namespace, key) + value = await self.bot.redis_session.hget(self._namespace, key) if value is None: log.trace(f"Value not found, returning default value {default}") @@ -281,7 +280,7 @@ class RedisCache: key = self._key_to_typestring(key) log.trace(f"Attempting to delete {key}.") - return await self._redis.hdel(self._namespace, key) + return await self.bot.redis_session.hdel(self._namespace, key) async def contains(self, key: RedisKeyType) -> bool: """ @@ -291,7 +290,7 @@ class RedisCache: """ await self._validate_cache() key = self._key_to_typestring(key) - exists = await self._redis.hexists(self._namespace, key) + exists = await self.bot.redis_session.hexists(self._namespace, key) log.trace(f"Testing if {key} exists in the RedisCache - Result is {exists}") return exists @@ -314,7 +313,7 @@ class RedisCache: """ await self._validate_cache() items = self._dict_from_typestring( - await self._redis.hgetall(self._namespace) + await self.bot.redis_session.hgetall(self._namespace) ).items() log.trace(f"Retrieving all key/value pairs from cache, total of {len(items)} items.") @@ -323,7 +322,7 @@ class RedisCache: async def length(self) -> int: """Return the number of items in the Redis cache.""" await self._validate_cache() - number_of_items = await self._redis.hlen(self._namespace) + number_of_items = await self.bot.redis_session.hlen(self._namespace) log.trace(f"Returning length. Result is {number_of_items}.") return number_of_items @@ -335,7 +334,7 @@ class RedisCache: """Deletes the entire hash from the Redis cache.""" await self._validate_cache() log.trace("Clearing the cache of all key/value pairs.") - await self._redis.delete(self._namespace) + await self.bot.redis_session.delete(self._namespace) async def pop(self, key: RedisKeyType, default: Optional[RedisValueType] = None) -> RedisValueType: """Get the item, remove it from the cache, and provide a default if not found.""" @@ -364,7 +363,7 @@ class RedisCache: """ await self._validate_cache() log.trace(f"Updating the cache with the following items:\n{items}") - await self._redis.hmset_dict(self._namespace, self._dict_to_typestring(items)) + await self.bot.redis_session.hmset_dict(self._namespace, self._dict_to_typestring(items)) async def increment(self, key: RedisKeyType, amount: Optional[int, float] = 1) -> None: """
Update _infer_shapes_nn_mlmodel.py Map object not subscriptable axis = list(map(int, params.axis)) In python3, for visualize_spec(), 'map' object is not subscriptable. Map object is passed to a list.
@@ -229,7 +229,7 @@ def _permute(layer, shape_dict): params = layer.permute Seq, Batch, Cin, Hin, Win = shape_dict[layer.input[0]] - axis = map(int, params.axis) + axis = list(map(int, params.axis)) dims = (Seq, Cin, Hin, Win) Seq_out = dims[axis[0]] Cout = dims[axis[1]]
Fix mock patching ES bulk ops Fixes pillowtop.tests.test_bulk:TestBulkDocOperations.test_process_changes_chunk_with_errors
@@ -128,7 +128,7 @@ class TestBulkDocOperations(TestCase): missing_case_ids = [uuid.uuid4().hex, uuid.uuid4().hex] changes = self._changes_from_ids(self.case_ids + missing_case_ids) - with patch('pillowtop.processors.elastic.bulk', return_value=mock_response): + with patch.object(ElasticsearchInterface, 'bulk_ops', return_value=mock_response): retry, errors = processor.process_changes_chunk(changes) self.assertEqual( set(missing_case_ids),
Update requirements.txt fix enum34 bug
@@ -26,7 +26,6 @@ django-extensions==3.1.1 django-picklefield==3.0.1 django-tastypie==0.14.3 docopt==0.6.2 -enum34==1.1.10 et-xmlfile==1.0.1 Fiona==1.8.18 future==0.18.2 @@ -90,10 +89,10 @@ requests-oauthlib==1.3.0 rollbar==0.14.7 rsa==4.7.1 scandir==1.5 -Shapely==1.7.0 +Shapely==1.7.1 simplegeneric==0.8.1 six==1.15.0 -smmap==3.0.4 +smmap==3.0.5 SQLAlchemy==1.3.1 sqlparse==0.4.1 stevedore==3.3.0
Prepare 2.6.1rc3 [ci skip-rust] [ci skip-build-wheels]
# 2.6.x Stable Releases +## 2.6.1rc3 (Sep 05, 2021) + +### Bug fixes + +* Fix UI rendering when a workunit has completed children but no running children (cherrypick of #12748) ([#12752](https://github.com/pantsbuild/pants/pull/12752)) + +* Include `.gitignore` in default `pantsd_invalidation_globs` (#12711) ([#12730](https://github.com/pantsbuild/pants/pull/12730)) + +### Documentation + +* Fix Toolchain plugin being included in generated reference docs (Cherry-pick of #12642) ([#12647](https://github.com/pantsbuild/pants/pull/12647)) + ## 2.6.1rc2 (Aug 24, 2021) ### New Features
update Yukon timezone Yukon decided to go on permanent DST and got a new tzinfo
@@ -5,7 +5,7 @@ from bs4 import BeautifulSoup import requests -timezone = 'Canada/Pacific' +timezone = 'America/Whitehorse' def fetch_production(zone_key='CA-YT', session=None, target_datetime=None, logger=None):
Cycles ShaderNetworkAlgo : Remove WITH_OSL ifdefs We are always building with OSL support, as a matter of principle.
@@ -176,7 +176,6 @@ ccl::ShaderNode *convertWalk( const ShaderNetwork::Parameter &outputParameter, c } else if( isOSLShader ) { -#ifdef WITH_OSL if( shaderManager && shaderManager->use_osl() ) { ccl::OSLShaderManager *manager = (ccl::OSLShaderManager*)shaderManager; @@ -185,13 +184,8 @@ ccl::ShaderNode *convertWalk( const ShaderNetwork::Parameter &outputParameter, c node = shaderGraph->add( node ); } else -#endif { -#ifdef WITH_OSL msg( Msg::Warning, "IECoreCycles::ShaderNetworkAlgo", boost::format( "Couldn't load OSL shader \"%s\" as the shading system is not set to OSL." ) % shader->getName() ); -#else - msg( Msg::Warning, "IECoreCycles::ShaderNetworkAlgo", boost::format( "Couldn't load OSL shader \"%s\" as GafferCycles wasn't compiled with OSL support." ) % shader->getName() ); -#endif return node; } }
Normalizing types in plotting * Normalizing types in plotting Sometimes we have both False and 'False', or both '1' and 1, or both '1.0' and 1.0, and it makes plots ugly. * Update plotting.py
@@ -245,6 +245,25 @@ def create_plots( df = remove_errors(df) df.loc[:, "loss"] = pd.to_numeric(df.loc[:, "loss"]) df = df.loc[:, [x for x in df.columns if not x.startswith("info/")]] + # Normalization of types. + for col in df.columns: + if col in ( + "budget", + "num_workers", + "dimension", + "useful_dimensions", + "num_blocks", + "block_dimension", + "num_objectives", + ): + df[col] = df[col].astype(float).astype(int) + elif col != "loss": + df[col] = df[col].astype(str) + df[col] = df[col].replace(r"\.[0]*$", "", regex=True) + try: + df.loc[:, col] = pd.to_numeric(df.loc[:, col]) + except: + pass if "num_objectives" in df.columns: df = df[df.num_objectives != 0] # the optimization did not even start # If we have a descriptor "instrum_str",
Bitbucket Cloud: Diffstat has more possibles states Conflicts due to file deletion and file rename were not convered, yet. According to my tests conflicts due to file addition get marked as "merge conflict".
@@ -13,7 +13,11 @@ class DiffStat(BitbucketCloudBase): MODIFIED = "modified" ADDED = "added" REMOVED = "removed" + LOCAL_DELETED = "local deleted" + REMOTE_DELETED = "remote deleted" MERGE_CONFLICT = "merge conflict" + RENAME_CONFLICT = "rename conflict" + RENAME_DELETE_CONFLICT = "rename/delete conflict" SUBREPO_CONFLICT = "subrepo conflict" def __init__(self, data, *args, **kwargs): @@ -43,7 +47,14 @@ class DiffStat(BitbucketCloudBase): @property def has_conflict(self): """True if the change causes a conflict.""" - return str(self.get_data("status")) in (self.MERGE_CONFLICT, self.SUBREPO_CONFLICT) + return str(self.get_data("status")) in [ + self.MERGE_CONFLICT, + self.RENAME_CONFLICT, + self.RENAME_DELETE_CONFLICT, + self.SUBREPO_CONFLICT, + self.LOCAL_DELETED, + self.REMOTE_DELETED, + ] class CommitFile(BitbucketCloudBase):
change param of list of tensor to tensor for _make_nccl_premul_sum ref:
@@ -628,13 +628,13 @@ class DistributedFusedLAMB(torch.optim.Optimizer): ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg] ar_stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(ar_stream): - works[chunk_id] = torch.distributed.all_reduce(self._flat_grads_chunks[block_id][chunk_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True,op=_make_nccl_premul_sum((scale,))) + works[chunk_id] = torch.distributed.all_reduce(self._flat_grads_chunks[block_id][chunk_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True,op=_make_nccl_premul_sum(scale)) else: glob_chunk_id = block_id ar_stream = self._ar_st[glob_chunk_id%self._num_ar_pg] ar_stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(ar_stream): - works0 = torch.distributed.all_reduce(self._flat_grads_blocks[block_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True,op=_make_nccl_premul_sum((scale,))) + works0 = torch.distributed.all_reduce(self._flat_grads_blocks[block_id],group=self._ar_pg[glob_chunk_id%self._num_ar_pg],async_op=True,op=_make_nccl_premul_sum(scale)) for i in range(self._num_chunks): works[i]=works0 self._reductions_works[block_id] = works @@ -668,7 +668,7 @@ class DistributedFusedLAMB(torch.optim.Optimizer): group=self._rs_pg[glob_chunk_id%self._num_rs_pg], async_op=True, no_copy=True, - op=_make_nccl_premul_sum((scale,)), + op=_make_nccl_premul_sum(scale), ) else: works[chunk_id] = torch.distributed.reduce_scatter_tensor( @@ -676,7 +676,7 @@ class DistributedFusedLAMB(torch.optim.Optimizer): input=self._flat_grads_chunks[block_id][chunk_id], group=self._rs_pg[glob_chunk_id%self._num_rs_pg], async_op=True, - op=_make_nccl_premul_sum((scale,)), + op=_make_nccl_premul_sum(scale), ) # Reduction across nodes for each rank
Fix Draco UV export Fix Fix Fix Fix Fix
@@ -309,7 +309,7 @@ def __compress_primitive(primitive, dll, export_settings): if normals is not None: extension['attributes']['NORMAL'] = normal_id - for (k, id) in enumerate(uvs): + for (k, id) in enumerate(uv_ids): extension['attributes']['TEXCOORD_' + str(k)] = id for (k, id) in enumerate(weight_ids):
Update README.md Fix broken refs to travis.
# Django Arctic [![PyPi version](https://img.shields.io/pypi/v/django-arctic.svg)](https://pypi.python.org/pypi/django-arctic/) -[![Build Status](https://travis-ci.org/sanoma/django-arctic.svg?branch=develop)](https://travis-ci.org/sanoma/django-arctic) -[![Coverage Status](https://coveralls.io/repos/github/sanoma/django-arctic/badge.svg?branch=develop)](https://coveralls.io/github/sanoma/django-arctic) +[![Build Status](https://travis-ci.org/dpgmediamagzines/django-arctic.svg?branch=develop)](https://travis-ci.org/dpgmediamagazines/django-arctic) +[![Coverage Status](https://coveralls.io/repos/github/dpgmediamagazines/django-arctic/badge.svg?branch=develop)](https://coveralls.io/github/dpgmediamagazines/django-arctic) [![Read the Docs](https://readthedocs.org/projects/django-arctic/badge/?version=latest)](https://django-arctic.readthedocs.io/en/latest/) [![Downloads](https://pepy.tech/badge/django-arctic/month)](https://pepy.tech/project/django-arctic/month)
Allow for RNAseq analysis from GVCF level See
@@ -97,14 +97,15 @@ def rnaseq_variant_calling(samples, run_parallel): """ samples = run_parallel("run_rnaseq_variant_calling", samples) variantcaller = dd.get_variantcaller(to_single_data(samples[0])) - if variantcaller and ("gatk-haplotype" in variantcaller): + jointcaller = dd.get_jointcaller(to_single_data(samples[0])) + if jointcaller and 'gatk-haplotype-joint' in jointcaller: out = [] for d in joint.square_off(samples, run_parallel): out.extend([[to_single_data(xs)] for xs in multi.split_variants_by_sample(to_single_data(d))]) samples = out - if variantcaller: + if variantcaller or jointcaller: samples = run_parallel("run_rnaseq_ann_filter", samples) - if variantcaller and ("gatk-haplotype" in variantcaller): + if jointcaller and 'gatk-haplotype-joint' in jointcaller: out = [] for data in (to_single_data(xs) for xs in samples): if "variants" not in data: @@ -148,8 +149,8 @@ def run_rnaseq_ann_filter(data): ann_file = population.run_vcfanno(dd.get_vrn_file(data), data) if ann_file: data = dd.set_vrn_file(data, ann_file) - variantcaller = dd.get_variantcaller(data) - if variantcaller and ("gatk-haplotype" in variantcaller): + jointcaller = dd.get_jointcaller(data) + if jointcaller and 'gatk-haplotype-joint' in jointcaller: filter_file = variation.gatk_filter_rnaseq(dd.get_vrn_file(data), data) data = dd.set_vrn_file(data, filter_file) # remove variants close to splice junctions
Issue XarrayDataCube tests: only test scipy engine against itself ref: conda-forge/staged-recipes#15717
@@ -169,6 +169,9 @@ def _roundtrips() -> Iterator[_SaveLoadRoundTrip]: netcdf_engines = _get_netcdf_engines() assert len(netcdf_engines) > 0 for e1, e2 in itertools.product(netcdf_engines, netcdf_engines): + if (e1 == "scipy") != (e2 == "scipy"): + # Only test scipy engine against itself + continue yield pytest.param( _SaveLoadRoundTrip( format="netcdf", save_kwargs={"engine": e1}, load_kwargs={"engine": e2}
Switch the new extra values into arrays Otherwise this was always inserting the comment boxes because the string was never empty.
@@ -79,16 +79,16 @@ volumeClaimTemplate: persistence: annotations: {} -extraVolumes: | +extraVolumes: [] # - name: extras # emptyDir: {} -extraVolumeMounts: | +extraVolumeMounts: [] # - name: extras # mountPath: /usr/share/extras # readOnly: true -extraInitContainers: | +extraInitContainers: [] # - name: do-something # image: busybox # command: ['do', 'something']
Bugfix in constraints manager Appears for multi-still refinement if some experiments have been removed for not having enough reflections. Then, number of parameters to constrain won't match the number of total parameters, and the wrong set of parameters is constrained.
@@ -229,7 +229,7 @@ def build_constraint(self, constraint_scope, parameterisation, model_type): ) for j in p.get_experiment_ids(): if j in constraint_scope.id: - prefixes.append(model_type + "{0}".format(i + 1)) + prefixes.append(model_type + "{0}".format(j + 1)) break # ignore model name prefixes
update doc for multinomial Summary: Update documentation to raise awareness of the fix in Thanks matteorr for pointing this out! Pull Request resolved:
@@ -3110,8 +3110,10 @@ If replacement is ``True``, samples are drawn with replacement. If not, they are drawn without replacement, which means that when a sample index is drawn for a row, it cannot be drawn again for that row. -This implies the constraint that :attr:`num_samples` must be lower than -:attr:`input` length (or number of columns of :attr:`input` if it is a matrix). +.. note:: + When drawn without replacement, :attr:`num_samples` must be lower than + number of non-zero elements in :attr:`input` (or the min number of non-zero + elements in each row of :attr:`input` if it is a matrix). Args: input (Tensor): the input tensor containing probabilities @@ -3122,8 +3124,11 @@ Args: Example:: >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights - >>> torch.multinomial(weights, 4) - tensor([ 1, 2, 0, 0]) + >>> torch.multinomial(weights, 2) + tensor([1, 2]) + >>> torch.multinomial(weights, 4) # ERROR! + RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False, + not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320 >>> torch.multinomial(weights, 4, replacement=True) tensor([ 2, 1, 1, 1]) """)
overhauled recseries.py Added ability to choose two different methods of series recursion termination fixed user-defined order as method='order' fixed user-defined tolerance as method='rtol'
@@ -6,7 +6,7 @@ from poliastro.core.elements import coe2rv, rv2coe @jit -def recseries_coe(k, p, ecc, inc, raan, argp, nu, tof, order=8): +def recseries_coe(k, p, ecc, inc, raan, argp, nu, tof, method='rtol', order=8, numiter=100, rtol=1e-8): # semi-major axis semi_axis_a = p / (1 - ecc ** 2) @@ -35,10 +35,22 @@ def recseries_coe(k, p, ecc, inc, raan, argp, nu, tof, order=8): # snapping anomaly to [0,pi] range M = M - 2 * np.pi * np.floor(M / 2 / np.pi) + # set recursion iteration + if method == 'rtol': + Niter = numiter + elif method == 'order': + Niter = order + else: + raise ValueError("Unknown recursion termination method ('rtol','order').") + # compute eccentric anomaly through recursive series E = M + ecc # Using initial guess from vallado to improve convergence - for i in range(0, order): - E = M + ecc * np.sin(E) + for i in range(0, Niter): + En = M + ecc * np.sin(E) + # check for break condition + if method=='rtol' and (abs(En-E)/abs(E))<rtol: + break + E = En return E_to_nu(E, ecc) @@ -50,7 +62,7 @@ def recseries_coe(k, p, ecc, inc, raan, argp, nu, tof, order=8): @jit -def recseries(k, r0, v0, tof, order=8): +def recseries(k, r0, v0, tof, method='rtol', order=8, numiter=100, rtol=1e-8): """Kepler solver for elliptical orbits with recursive series approximation method. The order of the series is a user defined parameter. @@ -64,8 +76,14 @@ def recseries(k, r0, v0, tof, order=8): Velocity vector. tof : float Time of flight. + method : str + Type of termination method ('rtol','order') order : int, optional Order of recursion, defaults to 8. + numiter : int, optional + Number of iterations, defaults to 100. + rtol : float, optional + Relative error for accuracy of the method, defaults to 1e-8. Returns ------- @@ -84,6 +102,6 @@ def recseries(k, r0, v0, tof, order=8): # Solve first for eccentricity and mean anomaly p, ecc, inc, raan, argp, nu = rv2coe(k, r0, v0) - nu = recseries_coe(k, p, ecc, inc, raan, argp, nu, tof, order) + nu = recseries_coe(k, p, ecc, inc, raan, argp, nu, tof, method, order, numiter, rtol) return coe2rv(k, p, ecc, inc, raan, argp, nu)
fix dployment of OCP 4.11 via Flexy on vsphere (Disconnected and Proxy environment) * fix dployment of ocp 4.11 via flexy on vsphere recent changes in flexy-templates (commit 4a4099541) changed location of terraform.tfstate file for the vSphere deployment of OCP 4.11
@@ -33,6 +33,7 @@ from ocs_ci.utility.flexy import ( configure_allowed_domains_in_proxy, load_cluster_info, ) +from ocs_ci.utility import version logger = logging.getLogger(__name__) @@ -488,7 +489,15 @@ class FlexyBase(object): terraform_data_dir = os.path.join( self.cluster_path, constants.TERRAFORM_DATA_DIR ) - for _file in ("terraform.tfstate", "terraform.tfvars"): + # related to flexy-templates changes 4a4099541 + if version.get_semantic_ocp_running_version() >= version.VERSION_4_11: + files_to_copy = ( + "upi_on_vsphere-terraform-scripts/terraform.tfstate", + "terraform.tfvars", + ) + else: + files_to_copy = ("terraform.tfstate", "terraform.tfvars") + for _file in files_to_copy: shutil.copy2( os.path.join(flexy_terraform_dir, _file), terraform_data_dir )
24 Pull Request CORS status change from 'Yes' to 'No'
@@ -245,7 +245,7 @@ API | Description | Auth | HTTPS | CORS | ### Development API | Description | Auth | HTTPS | CORS | |---|---|---|---|---| -| [24 Pull Requests](https://24pullrequests.com/api) | Project to promote open source collaboration during December | No | Yes | Yes | +| [24 Pull Requests](https://24pullrequests.com/api) | Project to promote open source collaboration during December | No | Yes | No | | [Agify.io](https://agify.io) | Estimates the age from a first name | No | Yes | Yes | | [ApiFlash](https://apiflash.com/) | Chrome based screenshot API for developers | `apiKey` | Yes | Unknown | | [Apility.io](https://apility.io/apidocs/) | IP, Domains and Emails anti-abuse API blocklist | No | Yes | Yes |
Cleanup: Match arguments of isless() Error was: npy_math_internal.h.src:570:24: error: no matching function for call to 'isless(npy_float&, int)' 570 | if (isless(b, 0) != isless(mod, 0)) { | ^
@@ -567,7 +567,7 @@ npy_divmod@c@(@type@ a, @type@ b, @type@ *modulus) /* adjust fmod result to conform to Python convention of remainder */ if (mod) { - if (isless(b, 0) != isless(mod, 0)) { + if (isless(b, (@type@)0) != isless(mod, (@type@)0)) { mod += b; div -= 1.0@c@; }
ci: run check first It doesn't make sense to first run all the tests and lint afterwards.
@@ -8,8 +8,23 @@ on: pull_request: jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.10 + uses: actions/setup-python@v2 + with: + python-version: "3.10" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install tox + - name: Linting + run: | + tox -e check build: - + needs: check runs-on: ubuntu-latest env: TERMINUSX_TOKEN: ${{ secrets.TERMINUSX_TOKEN_DEV }} @@ -38,23 +53,6 @@ jobs: files: ./cov.xml verbose: true - check: - runs-on: ubuntu-latest - needs: build - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.10 - uses: actions/setup-python@v2 - with: - python-version: "3.10" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - python -m pip install tox - - name: Linting - run: | - tox -e check - deploy: runs-on: ubuntu-latest needs: build
fix time_to_frame passing _check_sortings_equal() test
@@ -523,7 +523,7 @@ class NwbSortingExtractor(se.SortingExtractor): return get_dynamic_table_property(nwbfile.units, row_ids=unit_ids, property_name=property_name) def time_to_frame(self, time): - return ((time - self._t0) * self.get_sampling_frequency()).astype('int') + return np.round((time - self._t0) * self.get_sampling_frequency()).astype('int') def get_unit_spike_train(self, unit_id, start_frame=0, end_frame=np.Inf): check_nwb_install()
Skip ROCm test in test/test_cpp_extensions_aot.py Summary: Pull Request resolved: It may be flaky. Test Plan: Imported from OSS
@@ -2,7 +2,7 @@ import os import unittest import torch.testing._internal.common_utils as common -from torch.testing._internal.common_utils import IS_WINDOWS +from torch.testing._internal.common_utils import IS_WINDOWS, skipIfRocm from torch.testing._internal.common_cuda import TEST_CUDA import torch import torch.backends.cudnn @@ -55,6 +55,7 @@ class TestCppExtensionAOT(common.TestCase): expected_tensor_grad = torch.ones([4, 4], dtype=torch.double).mm(weights.t()) self.assertEqual(tensor.grad, expected_tensor_grad) + @skipIfRocm @unittest.skipIf(not TEST_CUDA, "CUDA not found") def test_cuda_extension(self): import torch_test_cpp_extension.cuda as cuda_extension
logging: log pkg version/type, platform info and cli arguments logging: log pkg version and type, platform info and cli arguments
@@ -180,6 +180,21 @@ def main(argv=None): # noqa: C901 if level is not None: set_loggers_level(level) + if level and level <= logging.DEBUG: + from platform import ( + platform, + python_implementation, + python_version, + ) + + from dvc import __version__ + from dvc.utils.pkg import PKG + + pyv = " ".join([python_implementation(), python_version()]) + pkg = f" ({PKG})" if PKG else "" + logger.debug("v%s%s, %s on %s", __version__, pkg, pyv, platform()) + logger.debug("command: %s", " ".join(argv or sys.argv)) + logger.trace(args) # type: ignore[attr-defined] if not sys.stdout.closed and not args.quiet:
STY: added kwargs to __repr__ Added the optional kwargs input to the inst __repr__ function.
@@ -1057,7 +1057,7 @@ class Instrument(object): self.name, "', sat_id='", self.sat_id, "', clean_level='", self.clean_level, "', pad={:}, orbit_info=".format(self.pad), - "{:})".format(self.orbit_info)]) + "{:}, **{:})".format(self.orbit_info, self.kwargs)]) return out_str
Break out search argument parsing This should be a passive change, while providing a useful extension point.
@@ -634,6 +634,19 @@ class SearchCommand(object): debug('%s.process finished under protocol_version=1', class_name) + def _protocol_v2_option_parser(self, arg): + """ Determines if an argument is an Option/Value pair, or just a Positional Argument. + Method so different search commands can handle parsing of arguments differently. + + :param arg: A single argument provided to the command from SPL + :type arg: str + + :return: [OptionName, OptionValue] OR [PositionalArgument] + :rtype: List[str] + + """ + return arg.split('=', 1) + def _process_protocol_v2(self, argv, ifile, ofile): """ Processes records on the `input stream optionally writing records to the output stream. @@ -704,7 +717,7 @@ class SearchCommand(object): if args and type(args) == list: for arg in args: - result = arg.split('=', 1) + result = self._protocol_v2_option_parser(arg) if len(result) == 1: self.fieldnames.append(str(result[0])) else:
feat: Add More Specific Type Annotations for Row Dictionaries The keys must be strings as they represent column names. Update type annotations to reflect this.
@@ -3349,10 +3349,10 @@ class Client(ClientWithProject): def insert_rows( self, table: Union[Table, TableReference, str], - rows: Union[Iterable[Tuple], Iterable[Dict]], + rows: Union[Iterable[Tuple], Iterable[Mapping[str, Any]]], selected_fields: Sequence[SchemaField] = None, **kwargs, - ) -> Sequence[dict]: + ) -> Sequence[Dict[str, Any]]: """Insert rows into a table via the streaming API. See @@ -3470,7 +3470,7 @@ class Client(ClientWithProject): def insert_rows_json( self, table: Union[Table, TableReference, TableListItem, str], - json_rows: Sequence[Dict], + json_rows: Sequence[Mapping[str, Any]], row_ids: Union[ Iterable[Optional[str]], AutoRowIDs, None ] = AutoRowIDs.GENERATE_UUID,
Added interleaving text in code blocks option If the message contains both plaintext and code blocks, the text will be ignored. If several code blocks are present, they are concatenated.
@@ -31,6 +31,15 @@ FORMATTED_CODE_REGEX = re.compile( r"\s*$", # any trailing whitespace until the end of the string re.DOTALL | re.IGNORECASE # "." also matches newlines, case insensitive ) +CODE_BLOCK_REGEX = re.compile( + r"```" # code block delimiter: 3 batckticks + r"([a-z]+\n)?" # match optional language (only letters plus newline) + r"(?:[ \t]*\n)*" # any blank (empty or tabs/spaces only) lines before the code + r"(?P<code>.*?)" # extract all code inside the markup + r"\s*" # any more whitespace before the end of the code markup + r"```", # code block end + re.DOTALL | re.IGNORECASE # "." also matches newlines, case insensitive +) RAW_CODE_REGEX = re.compile( r"^(?:[ \t]*\n)*" # any blank (empty or tabs/spaces only) lines before the code r"(?P<code>.*?)" # extract all the rest as code @@ -78,7 +87,9 @@ class Snekbox(Cog): def prepare_input(code: str) -> str: """Extract code from the Markdown, format it, and insert it into the code template.""" match = FORMATTED_CODE_REGEX.fullmatch(code) - if match: + + # Despite the wildcard being lazy, this is a fullmatch so we need to check the presence of the delim explicitly. + if match and match.group("delim") not in match.group("code"): code, block, lang, delim = match.group("code", "block", "lang", "delim") code = textwrap.dedent(code) if block: @@ -86,6 +97,14 @@ class Snekbox(Cog): else: info = f"{delim}-enclosed inline code" log.trace(f"Extracted {info} for evaluation:\n{code}") + + else: + code_parts = CODE_BLOCK_REGEX.finditer(code) + merge = '\n'.join(map(lambda part: part.group("code"), code_parts)) + if merge: + code = textwrap.dedent(merge) + log.trace(f"Merged one or more code blocks from text combined with code:\n{code}") + else: code = textwrap.dedent(RAW_CODE_REGEX.fullmatch(code).group("code")) log.trace(
When creating resources, default creator to current_user Still, leave the default-to-parent around. Unfortunate. Mostly because of tests. But let's keep it. Eh.
# * See the License for the specific language governing permissions and # * limitations under the License. +from flask_security import current_user + from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.ext.associationproxy import association_proxy @@ -120,6 +122,10 @@ class SQLResourceBase(SQLModelBase): # foreign key) with db.session.no_autoflush: if not self.creator: + user = current_user._get_current_object() + if user.is_authenticated: + self.creator = user + else: self.creator = parent_instance.creator self.tenant = parent_instance.tenant self.visibility = parent_instance.visibility
Fix cancel button beyond of create-rule modal This patch is to fixes the problem that the cancel button of create-rule dialogue box floats too far to the left outside of the dialogue box.
{% if workflow.wizard %} <div class="row"> <div class="col-sm-1"> - <a href="{% url 'horizon:admin:policies:detail' policy_name %}" class="btn btn-default secondary cancel close">{% trans "Cancel" %}</a> + <a href="{% url 'horizon:admin:policies:detail' policy_name %}" class="btn btn-default secondary cancel">{% trans "Cancel" %}</a> </div> <div class="col-sm-5 back">
Max retries for abort * Adds a retry counter for the abort method to ensure that an infinite loop can never occur.
@@ -534,14 +534,22 @@ class BoxProvider(provider.BaseProvider): parts = await self._upload_chunks(data, session_data) # Step A.4 complete the session and return the upload file's metadata. - while True: + retry = 10 + while retry > 0: + --retry try: return await self._complete_chunked_upload_session(session_data, parts, data_sha) except RetryChunkedUploadCommit: continue - except exceptions.UploadError: - return await self._abort_chunked_upload(session_data, data_sha) + except Exception as err: + msg = 'An unexpected error has occurred during the multi-part upload.' + logger.error('{} upload_id={} error={!r}'.format(msg, session_data, err)) + aborted = await self._abort_chunked_upload(session_data, data_sha) + if aborted: + msg += ' The abort action failed to clean up the temporary file parts generated ' \ + 'during the upload process. Please manually remove them.' + raise exceptions.UploadError(msg) async def _create_chunked_upload_session(self, path: WaterButlerPath, data: bytes) -> dict: """Create an upload session to use with a chunked upload. @@ -636,9 +644,7 @@ class BoxProvider(provider.BaseProvider): 'Content-Type:': 'application/json', 'Digest': 'sha={}'.format(data_sha) }, - expects=(201,), + expects=(204,), throws=exceptions.UploadError, - ) as resp: - entry = (await resp.json())['entries'][0] - - return entry + ): + return True
Access block_store through instance variable The completer already has access to the block store through an instance variable so it doesn't need to use the BlockCache property.
@@ -235,7 +235,7 @@ class Completer: # Check to see if the dependency has been seen or is in the # current chain (block_store) if dependency not in self._seen_txns and not \ - self.block_cache.block_store.has_transaction( + self._block_store.has_transaction( dependency): self._unsatisfied_dependency_count.inc() @@ -347,9 +347,8 @@ class Completer: if batch_id in self.batch_cache: return self.batch_cache[batch_id] - block_store = self.block_cache.block_store try: - return block_store.get_batch(batch_id) + return self._block_store.get_batch(batch_id) except ValueError: return None @@ -359,9 +358,8 @@ class Completer: batch_id = self._seen_txns[transaction_id] return self.get_batch(batch_id) - block_store = self.block_cache.block_store try: - return block_store.get_batch_by_transaction(transaction_id) + return self._block_store.get_batch_by_transaction(transaction_id) except ValueError: return None
Update generic.txt Moving to OSTAP (reason: )
@@ -11604,12 +11604,6 @@ m9b4s2.site lucian0lu1.freeheberg.org -# Reference: https://twitter.com/abuse_ch/status/1338042129483001859 -# Reference: https://urlhaus.abuse.ch/url/883464/ -# Reference: https://www.virustotal.com/gui/ip-address/188.127.224.100/relations - -http://188.127.224.100 - # Reference: https://www.virustotal.com/gui/file/1303a2d7876790af2cc196a816df2261506b157605006e603246b58f09408888/detection http://148.72.155.40
make_test_environ_builder: use url_scheme from path if provided When providing https url in path ("https://example.com/") we hope that we will get https scheme in environment
@@ -40,10 +40,10 @@ def make_test_environ_builder( if subdomain: http_host = '{0}.{1}'.format(subdomain, http_host) + url = url_parse(path) if url_scheme is None: - url_scheme = app.config['PREFERRED_URL_SCHEME'] + url_scheme = url.scheme or app.config['PREFERRED_URL_SCHEME'] - url = url_parse(path) base_url = '{0}://{1}/{2}'.format( url_scheme, url.netloc or http_host, app_root.lstrip('/') )
Update simulator_qiskit.py better warning for typical faulty call with device=fake_XXX and forgotten samples keyword
@@ -261,7 +261,7 @@ class BackendCircuitQiskit(BackendCircuit): qiskit_backend = self.retrieve_device('statevector_simulator') else: if 'statevector' not in str(self.device): - raise TequilaException('For simulation, only state vector simulators are supported; recieved {}'.format(self.device)) + raise TequilaException('For simulation, only state vector simulators are supported; recieved device={}, you might have forgoten to set the samples keyword - e.g. (device={}, samples=1000). If not set, tequila assumes that full wavefunction simualtion is demanded which is not compatible with qiskit devices or fake devices except for device=statevector'.format(self.device, self.device)) else: qiskit_backend = self.retrieve_device(self.device) else:
add description add description of `e2e-home-appliance-status-monitoring`
@@ -16,6 +16,7 @@ The examples folder contains example solutions across a variety of Google Cloud * [Dataflow Python Examples](examples/dataflow-python-examples) - Various ETL examples using the Dataflow Python SDK. * [Data Generator](examples/dataflow-data-generator) - Generate random data with a custom schema at scale for integration tests or demos. * [Dataflow Streaming Benchmark](examples/dataflow-streaming-benchmark) - Utility to publish randomized fake JSON messages to a Cloud Pub/Sub topic at a configured QPS. +* [Home Appliance Status Monitoring from Smart Power Readings](examples/e2e-home-appliance-status-monitoring) - An end-to-end demo system featuring a suite of Google Cloud Platform products such as IoT Core, ML Engine, BigQuery, etc. * [IoT Nirvana](examples/iot-nirvana) - An end-to-end Internet of Things architecture running on Google Cloud Platform. * [Pub/Sub Client Batching Example](examples/pubsub-publish-avro-example) - Batching in Pub/Sub's Java client API. * [QAOA](examples/qaoa) - Examples of parsing a max-SAT problem in a proprietary format.
version: improve link type detection fall back * version: improve link type detection fall back If failed to determine cache type print link to related web page. Fixes * Update dvc/command/version.py * fix formatting
@@ -10,7 +10,7 @@ from dvc.command.base import CmdBaseNoRepo, append_doc_link from dvc.exceptions import DvcException, NotDvcRepoError from dvc.scm.base import SCMError from dvc.system import System -from dvc.utils import relpath +from dvc.utils import error_link from dvc.utils.pkg import PKG from dvc.version import __version__ @@ -59,13 +59,7 @@ class CmdVersion(CmdBaseNoRepo): fs_type = self.get_fs_type(repo.cache.local.cache_dir) info.append(f"Cache directory: {fs_type}") else: - logger.warning( - "Unable to detect supported link types, as cache " - "directory '{}' doesn't exist. It is usually auto-created " - "by commands such as `dvc add/fetch/pull/run/import`, " - "but you could create it manually to enable this " - "check.".format(relpath(repo.cache.local.cache_dir)) - ) + info.append("Cache types: " + error_link("no-dvc-cache")) except NotDvcRepoError: root_directory = os.getcwd()
fix(api): migration referenced invalid field The buddy code did not show up during testing. The reason is that the issue was with migrating the existing data rows. However, migrations are applied to an empty database, so the buggy code was in fact not tested.
@@ -17,7 +17,7 @@ def forward_convert(apps, schema_editor): rrsets = RRset.objects.filter(domain=domain) created = rrsets.aggregate(Max('created'))['created__max'] - published = rrsets.aggregate(Max('published'))['published__max'] or created + published = rrsets.aggregate(Max('updated'))['updated__max'] or created # .update() operates on a queryset (not on a Model instance) Domain.objects.filter(pk=domain.pk).update(published=max(created, published))
Remove ambiguity from the git checkout command used by the detection testing code.
@@ -70,9 +70,13 @@ class GithubService: # No checking to see if the hash is to a commit inside of the branch - the user # has to do that by hand. + + # -- ensures that we check out the appropriate branch or commit hash. + # Without --, there can be ambiguity if a file/folder exists with the + # same name as the branch, causing the checkout to fail with error if commit_hash is not None: print("Checking out commit hash: [%s]" % (commit_hash)) - self.security_content_repo_obj.git.checkout(commit_hash) + self.security_content_repo_obj.git.checkout(commit_hash, '--') else: #Even if we have fetched a PR, we still MUST check out the branch to # be able to do anything with it. Otherwise we won't have the files @@ -80,7 +84,7 @@ class GithubService: (security_content_branch), end='') sys.stdout.flush() self.security_content_repo_obj.git.checkout( - security_content_branch) + security_content_branch, '--') commit_hash = self.security_content_repo_obj.head.object.hexsha print("commit_hash %s" % (commit_hash))
test: Check if Kanban Board is not editable by non-system user who does not own the board User should only be able to view the board, and drag the cards No column actions, no board actions
context("Kanban Board", () => { before(() => { - cy.login(); + cy.login("[email protected]"); cy.visit("/app"); }); @@ -96,4 +96,36 @@ context("Kanban Board", () => { .first() .should("not.contain", "ID:"); }); + + it("Checks if Kanban Board edits are blocked for non-System Manager and non-owner of the Board", () => { + // create admin kanban board + cy.call("frappe.tests.ui_test_helpers.create_todo", { description: "Frappe User ToDo" }); + + cy.switch_to_user("Administrator"); + cy.call("frappe.tests.ui_test_helpers.create_admin_kanban"); + // remove sys manager + cy.remove_role("[email protected]", "System Manager"); + + cy.switch_to_user("[email protected]"); + + cy.visit("/app/todo/view/kanban/Admin Kanban"); + + // Menu button should be hidden (dropdown for 'Save Filters' and 'Delete Kanban Board') + cy.get(".no-list-sidebar .menu-btn-group .btn-default[data-original-title='Menu']").should( + "have.length", + 0 + ); + // Kanban Columns should be visible (read-only) + cy.get(".kanban .kanban-column").should("have.length", 2); + // User should be able to add card (has access to ToDo) + cy.get(".kanban .add-card").should("have.length", 2); + // Column actions should be hidden (dropdown for 'Archive' and indicators) + cy.get(".kanban .column-options").should("have.length", 0); + + cy.add_role("[email protected]", "System Manager"); + }); + + after(() => { + cy.call("logout"); + }); });
fix Some modules set __file__ as None This is not allowed (the __file__ attribute MUST be either a string, or unset), but seems to happen anyway and is easy to work around in bottle.
@@ -3753,7 +3753,7 @@ class FileCheckerThread(threading.Thread): files = dict() for module in list(sys.modules.values()): - path = getattr(module, '__file__', '') + path = getattr(module, '__file__', '') or '' if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] if path and exists(path): files[path] = mtime(path)
Hard code top package name Using the package name to identify the top module name can cause issues if the package is renamed for any reasons. Hard coding the top module ensures more resiliency towards these types of accidental failures and provides an obvious mapping for the file names.
@@ -36,7 +36,6 @@ import pkg_resources as pkg import random from mistral import exceptions as exc -from mistral import version # Thread local storage. @@ -176,10 +175,7 @@ def update_dict(left, right): def get_file_list(directory): - base_path = pkg.resource_filename( - version.version_info.package, - directory - ) + base_path = pkg.resource_filename("mistral", directory) return [path.join(base_path, f) for f in os.listdir(base_path) if path.isfile(path.join(base_path, f))]
settings page: Apply dark theme to 'code' elements in night mode. The example regexes for linkifier settings are not themed according to the user's dark theme setting. Modify 'night_mode.scss' to render all 'code' elements with dark theme.
@@ -566,8 +566,7 @@ on a dark background, and don't change the dark labels dark either. */ } #feedback_container, - .rendered_markdown code, - .feedback_content code, + code, .typeahead.dropdown-menu { background-color: hsl(212, 25%, 15%); border-color: hsla(0, 0%, 0%, 0.5);
fix(nuke): the path was only working with C: Also the replace was moved to Extract Review
@@ -45,8 +45,7 @@ class ExtractReviewLutData(pype.api.Extractor): # assign to representations instance.data["lutPath"] = os.path.join( - exporter.stagingDir, exporter.file).replace("\\", "/").replace( - "C:/", "C\\:/") + exporter.stagingDir, exporter.file).replace("\\", "/") instance.data["representations"] += data["representations"] self.log.debug(
[sync] increase wait timeouts from 5 to 40 sec reduces CPU wakes
@@ -1908,7 +1908,7 @@ class SyncEngine: return changes, now def wait_for_local_changes( - self, timeout: float = 5, delay: float = 1 + self, timeout: float = 40, delay: float = 1 ) -> Tuple[List[SyncEvent], float]: """ Waits for local file changes. Returns a list of local changes with at most one @@ -4020,13 +4020,13 @@ class SyncMonitor: self.running = Event() # create new event to let old threads shut down - self.local_observer_thread = Observer(timeout=5) + self.local_observer_thread = Observer(timeout=40) self.local_observer_thread.setName("maestral-fsobserver") self._watch = self.local_observer_thread.schedule( self.fs_event_handler, self.sync.dropbox_path, recursive=True ) - for emitter in self.local_observer_thread.emitters: - emitter.setName("maestral-fsemitter") + for i, emitter in enumerate(self.local_observer_thread.emitters): + emitter.setName(f"maestral-fsemitter-{i}") self.helper_thread = Thread( target=helper, daemon=True, args=(self,), name="maestral-helper" @@ -4147,9 +4147,9 @@ class SyncMonitor: self.sync.cancel_pending.clear() self.local_observer_thread.stop() - self.local_observer_thread.join() - self.helper_thread.join() - self.upload_thread.join() + # self.local_observer_thread.join() + # self.helper_thread.join() + # self.upload_thread.join() logger.info(STOPPED)
Skip collectting items for skipif_ocs_version or skipif_upgraded_from in conftest.py for deployments and teardown
@@ -98,7 +98,7 @@ def pytest_logger_config(logger_config): logger_config.set_formatter_class(OCSLogFormatter) -def pytest_collection_modifyitems(session, config, items): +def pytest_collection_modifyitems(session, items): """ A pytest hook to filter out skipped tests satisfying skipif_ocs_version or skipif_upgraded_from @@ -109,6 +109,9 @@ def pytest_collection_modifyitems(session, config, items): items: list of collected tests """ + teardown = config.RUN['cli_params'].get('teardown') + deploy = config.RUN['cli_params'].get('deploy') + if not (teardown or deploy): for item in items[:]: skipif_ocs_version_marker = item.get_closest_marker( "skipif_ocs_version"
refactor: delete_from_table Whole lot of unnecessary complexity, closure, multiple function calls, comprehensions all that can be replaced with single `get_all`
import os import shutil +from typing import List import frappe import frappe.defaults @@ -188,39 +189,24 @@ def update_naming_series(doc): revert_series_if_last(doc.meta.autoname, doc.name, doc) -def delete_from_table(doctype, name, ignore_doctypes, doc): +def delete_from_table(doctype: str, name: str, ignore_doctypes: List[str], doc): if doctype != "DocType" and doctype == name: frappe.db.delete("Singles", {"doctype": name}) else: frappe.db.delete(doctype, {"name": name}) - # get child tables if doc: - tables = [d.options for d in doc.meta.get_table_fields()] - + child_doctypes = [d.options for d in doc.meta.get_table_fields()] else: - - def get_table_fields(field_doctype): - if field_doctype == "Custom Field": - return [] - - return [ - r[0] - for r in frappe.get_all( - field_doctype, + child_doctypes = frappe.get_all( + "DocField", fields="options", filters={"fieldtype": ["in", frappe.model.table_fields], "parent": doctype}, - as_list=1, + pluck="options", ) - ] - - tables = get_table_fields("DocField") - if not frappe.flags.in_install == "frappe": - tables += get_table_fields("Custom Field") - # delete from child tables - for t in list(set(tables)): - if t not in ignore_doctypes: - frappe.db.delete(t, {"parenttype": doctype, "parent": name}) + child_doctypes_to_delete = set(child_doctypes) - set(ignore_doctypes) + for child_doctype in child_doctypes_to_delete: + frappe.db.delete(child_doctype, {"parenttype": doctype, "parent": name}) def update_flags(doc, flags=None, ignore_permissions=False):