message
stringlengths
13
484
diff
stringlengths
38
4.63k
reduce number of points in bounding polygons very high number of points causes performance issues in badly written software
@@ -572,11 +572,11 @@ def calculate_polygonal_environment(im: PIL.Image.Image = None, angle = np.arctan2(dir_vec[1], dir_vec[0]) if topline: - upper_seam = geom.LineString(_calc_seam(baseline, upper_polygon, angle)).simplify(1) - bottom_seam = geom.LineString(_calc_seam(offset_baseline, bottom_offset_polygon, angle)).simplify(1) + upper_seam = geom.LineString(_calc_seam(baseline, upper_polygon, angle)).simplify(5) + bottom_seam = geom.LineString(_calc_seam(offset_baseline, bottom_offset_polygon, angle)).simplify(5) else: - upper_seam = geom.LineString(_calc_seam(offset_baseline, upper_offset_polygon, angle)).simplify(1) - bottom_seam = geom.LineString(_calc_seam(baseline, bottom_polygon, angle)).simplify(1) + upper_seam = geom.LineString(_calc_seam(offset_baseline, upper_offset_polygon, angle)).simplify(5) + bottom_seam = geom.LineString(_calc_seam(baseline, bottom_polygon, angle)).simplify(5) # ugly workaround against GEOM parallel_offset bug creating a # MultiLineString out of offset LineString
BUG: fix for 2D Fixed unit test for 2D, use only variable names that are common for all test instruments.
@@ -432,13 +432,12 @@ class TestBasics(): self.testInst.load(2009, 1) # Ensure the desired data variable is present and delete all others - self.testInst.data = self.testInst.data[['dummy1', 'mlt']] + self.testInst.data = self.testInst.data[['mlt']] # Test output with one data variable output = self.testInst.__str__() assert output.find('Number of variables:') > 0 assert output.find('Variable Names') > 0 - assert output.find('dummy1') > 0 assert output.find('mlt') > 0 # -------------------------------------------------------------------------
Fix the GPU trainer for NoneCalibration and RNN Summary: Pull Request resolved:
@@ -1496,6 +1496,12 @@ class Net(object): if device_option is not None: ops = [copy.deepcopy(op) for op in ops] map(lambda x: x.device_option.CopyFrom(device_option), ops) + for op in ops: + if op.type == "RecurrentNetwork": + for arg in op.arg: + if arg.name.endswith('step_net'): + for step_op in arg.n.op: + step_op.device_option.CopyFrom(device_option) self._ExtendOps(ops) return self
Update task.py adding subtask shuffling to coqa task
@@ -19,6 +19,7 @@ import bigbench.api.task as task from bigbench.benchmark_tasks.coqa_conversational_question_answering.coqa_official_evaluation_script import \ CoQAEvaluator import os +import random class CoQA(task.Task): @@ -61,8 +62,13 @@ class CoQA(task.Task): ) def evaluate_model(self, model, max_examples=-1, random_seed=None): + if random_seed: + random.seed(random_seed) + else: + random.seed(0) max_examples = max_examples if max_examples > 0 else self.num_examples turn_ids = list(self.evaluator.gold_data.keys()) + random.shuffle(turn_ids) batch_start_index = 0 predicted_answers = {}
[varLib] Optimize GDEF VarStore when generating varfonts Sweet...
@@ -518,6 +518,11 @@ def _merge_OTL(font, model, master_fonts, axisTags): GDEF.Version = 0x00010003 GDEF.VarStore = store + # Optimize + varidx_map = store.optimize() + GDEF.remap_device_varidxes(varidx_map) + font['GPOS'].table.remap_device_varidxes(varidx_map) + # Pretty much all of this file should be redesigned and moved inot submodules...
Update _instrument.py Minor improvement to concat_data docstring
@@ -707,7 +707,8 @@ class Instrument(object): user provided value is used instead. For xarray, dim='time' is passed along to xarray.concat - except if the user includes a value as a keyword argument. + except if the user includes a value for dim as a + keyword argument. Parameters ----------
Update rays.py Cleanup regarding generators and detectors. Spearated the logic better.
@@ -9,11 +9,9 @@ Subclasses can provide a computed ray for Monte Carlo simulation. """ class Rays: - def __init__(self, rays=[], maxCount=0, histogramOnly=False): + def __init__(self, rays=[]): self.rays = rays - self.maxCount = maxCount self.iteration = 0 - self.histogramOnly = histogramOnly # We cache these because they can be lengthy to calculate self._yValues = None @@ -24,10 +22,6 @@ class Rays: self._thetaHistogram = None self._directionBinEdges = None - @property - def count(self): - return len(self) - @property def yValues(self): if self._yValues is None: @@ -96,18 +90,20 @@ class Rays: fig = plt.figure() axis1 = fig.add_subplot() (x,y) = self.rayCountHistogram() - axis1.plot(x,y,'k-') + axis1.plot(x,y,'k-',label="Intensity") plt.xlabel("Distance") plt.ylim([0, max(y)*1.1]) if showTheta: (x,y) = self.rayAnglesHistogram() axis2 = axis1.twiny() - axis2.plot(x,y,'k--') + axis2.plot(x,y,'k--',label="Orientation profile") plt.xlabel("Angles [rad]") plt.xlim([-pi/2,pi/2]) plt.ylim([0, max(y)*1.1]) +# legend = axis1.legend(loc='upper right', shadow=True, fontsize='x-large') + plt.ylabel("Ray count") plt.title(title) plt.show() @@ -122,16 +118,19 @@ class Rays: plt.show() def __len__(self) -> int: - if self.array is not None: - return len(self.array) + if self.rays is not None: + return len(self.rays) else: - return self.maxCount + return 0 def __iter__(self): self.iteration = 0 return self def __next__(self) -> Ray : + if self.rays is None: + raise StopIteration + if self.iteration < len(self.rays): ray = self.rays[self.iteration] self.iteration += 1 @@ -140,9 +139,7 @@ class Rays: raise StopIteration def append(self, ray): - if self.histogramOnly: - raise NotImplemented - else: + if self.rays is not None: self.rays.append(ray) # Invalidate cached values @@ -241,24 +238,23 @@ class RandomUniformRays(Rays): super(RandomUniformRays, self).__init__(rays=rays) class RandomLambertianRays(Rays): - def __init__(self, yMax, yMin=None, M=10000): + def __init__(self, yMax, yMin=None, maxCount=10000): self.yMax = yMax self.yMin = yMin if yMin is None: self.yMin = -yMax + self.maxCount = maxCount + self.thetaMax = -pi/2 self.thetaMin = pi/2 - self.M = M - super(RandomLambertianRays, self).__init__(rays=None) - @property - def count(self): - return self.M + def __len__(self) -> int: + return self.maxCount def __next__(self) -> Ray : - if self.iteration >= self.M: + if self.iteration >= self.maxCount: raise StopIteration self.iteration += 1
tweak xla_bridge.py flags * add environment variables for jax_disable_most_optimizations and jax_cpu_backend_variant * comment on the default values in help strings
@@ -30,7 +30,7 @@ from absl import logging logging._warn_preinit_stderr = 0 import jax.lib -from .._src.config import flags +from .._src.config import flags, bool_env from . import tpu_driver_client from . import xla_client from jax._src import util, traceback_util @@ -52,21 +52,26 @@ flags.DEFINE_string( 'provided, --jax_xla_backend takes priority. Prefer --jax_platform_name.') flags.DEFINE_string( 'jax_backend_target', 'local', - 'Either "local" or "rpc:address" to connect to a remote service target.') + 'Either "local" or "rpc:address" to connect to a remote service target. ' + 'The default is "local".') flags.DEFINE_string( 'jax_platform_name', - os.getenv('JAX_PLATFORM_NAME', ''), + os.getenv('JAX_PLATFORM_NAME', '').lower(), 'Platform name for XLA. The default is to attempt to use a GPU or TPU if ' 'available, but fall back to CPU otherwise. To set the platform manually, ' - 'pass "cpu" for CPU, "gpu" for GPU, etc.') + 'pass "cpu" for CPU, "gpu" for GPU, etc. If intending to use CPU, ' + 'setting the platform name to "cpu" can silence warnings that appear with ' + 'the default setting.') flags.DEFINE_bool( - 'jax_disable_most_optimizations', False, + 'jax_disable_most_optimizations', + bool_env('JAX_DISABLE_MOST_OPTIMIZATIONS', False), 'Try not to do much optimization work. This can be useful if the cost of ' 'optimization is greater than that of running a less-optimized program.') flags.DEFINE_string( - 'jax_cpu_backend_variant', 'tfrt', - 'jax_cpu_backend_variant selects cpu backend variant: stream_executor or ' - 'tfrt') + 'jax_cpu_backend_variant', + os.getenv('JAX_CPU_BACKEND_VARIANT', 'tfrt'), + 'Selects CPU backend runtime variant: "stream_executor" or "tfrt". The ' + 'default is "tfrt".') def get_compile_options( num_replicas: int,
Update interactive-message-buttons.rst Fixed a small typo
@@ -76,7 +76,7 @@ Name Give your action a descriptive name. URL - The actions are backed by an integration that handles HTTP POST requests when users clicks the message button. The URL parameter determines where this action is sent to. The request contains an ``application/json`` JSON string. + The actions are backed by an integration that handles HTTP POST requests when users click the message button. The URL parameter determines where this action is sent to. The request contains an ``application/json`` JSON string. Context The requests sent to the specified URL contain the user id and any context that was provided in the action definition. A simple example is given below:
Only change what is essential for test fix Revert change to keep list iteration Fix method name typo
@@ -263,7 +263,6 @@ For example: # Import python libs from __future__ import absolute_import -import collections import difflib import itertools import logging @@ -614,7 +613,7 @@ def _find_keep_files(root, keep): ''' real_keep = set() real_keep.add(root) - if isinstance(keep, collections.Iterable): + if isinstance(keep, list): for fn_ in keep: if not os.path.isabs(fn_): continue @@ -634,7 +633,7 @@ def _clean_dir(root, keep, exclude_pat): Clean out all of the files and directories in a directory (root) while preserving the files in a list (keep) and part of exclude_pat ''' - real_keep = _find_keep_files_old(root, keep) + real_keep = _find_keep_files(root, keep) removed = set() def _delete_not_kept(nfn): if nfn not in real_keep:
Update README.md * Update README.md add News section for zz request. * Update README.md * Update README.md
DGL is an easy-to-use, high performance and scalable Python package for deep learning on graphs. DGL is framework agnostic, meaning if a deep graph model is a component of an end-to-end application, the rest of the logics can be implemented in any major frameworks, such as PyTorch, Apache MXNet or TensorFlow. <p align="center"> - <img src="https://i.imgur.com/DwA1NbZ.png" alt="DGL v0.4 architecture" width="600"> + <img src="http://data.dgl.ai/asset/image/DGL-Arch.png" alt="DGL v0.4 architecture" width="600"> <br> <b>Figure</b>: DGL Overall Architecture </p> +## <img src="http://data.dgl.ai/asset/image/new.png" width="30">DGL News +03/02/2020: DGL has be chosen as the implemenation base for [Graph Neural Network benchmark framework](https://arxiv.org/abs/2003.00982), which enchmarks framework to novel medium-scale graph datasets from mathematical modeling, computer vision, chemistry and combinatorial problems. Models implemented are [here](https://github.com/graphdeeplearning/benchmarking-gnns). ## Using DGL @@ -101,16 +103,16 @@ class GATLayer(nn.Module): Table: Training time(in seconds) for 200 epochs and memory consumption(GB) High memory utilization allows DGL to push the limit of single-GPU performance, as seen in below images. -| <img src="https://i.imgur.com/CvXc9Uu.png" width="400"> | <img src="https://i.imgur.com/HnCfJyU.png" width="400"> | +| <img src="http://data.dgl.ai/asset/image/DGLvsPyG-time1.png" width="400"> | <img src="http://data.dgl.ai/asset/image/DGLvsPyG-time2.png" width="400"> | | -------- | -------- | **Scalability**: DGL has fully leveraged multiple GPUs in both one machine and clusters for increasing training speed, and has better performance than alternatives, as seen in below images. <p align="center"> - <img src="https://i.imgur.com/IGERtVX.png" width="600"> + <img src="http://data.dgl.ai/asset/image/one-four-GPUs.png" width="600"> </p> -| <img src="https://i.imgur.com/BugYro2.png"> | <img src="https://i.imgur.com/KQ4nVdX.png"> | +| <img src="http://data.dgl.ai/asset/image/one-four-GPUs-DGLvsGraphVite.png"> | <img src="http://data.dgl.ai/asset/image/one-fourMachines.png"> | | :---------------------------------------: | -- |
Pinning CircleCI image to the one used when build times were usual (Consulted with CircleCI support).
@@ -8,7 +8,7 @@ version: 2 jobs: quick-build: docker: - - image: circleci/python:3.6 + - image: circleci/python@sha256:b568e83138255d4f0b89610ac18af53d4e2bd13c2e1b5538607ec3dc729e7eb4 # Pinning CircleCI image to the one used when build times were usual (Consulted with CircleCI support). environment: DISTRIB: "conda" PYTHON_VERSION: "3.6"
fix don't report managing Adobe-only users when we haven't. Needed to distinguish writing output file from management.
@@ -99,12 +99,11 @@ class RuleProcessor(object): self.read_stray_key_map(options['stray_list_input_path']) self.stray_list_output_path = options['stray_list_output_path'] - # determine whether we need to process strays at all - self.will_process_strays = (not options['exclude_strays']) and (options['manage_groups'] or - options['stray_list_output_path'] or - options['disentitle_strays'] or - options['remove_strays'] or - options['delete_strays']) + # determine what processing is needed on strays + self.will_manage_strays = (options['manage_groups'] or options['disentitle_strays'] or + options['remove_strays'] or options['delete_strays']) + self.will_process_strays = (not options['exclude_strays']) and (options['stray_list_output_path'] or + self.will_manage_strays) # in/out variables for per-user after-mapping-hook code self.after_mapping_hook_scope = { @@ -193,7 +192,7 @@ class RuleProcessor(object): ['adobe_users_excluded', 'Number of Adobe users excluded from updates'], ['adobe_users_unchanged', 'Number of non-excluded Adobe users with no changes'], ['adobe_users_created', 'Number of new Adobe users added'], - ['adobe_users_updated', 'Number of existing Adobe users updated'], + ['adobe_users_updated', 'Number of matching Adobe users updated'], ] if self.will_process_strays: if self.options['delete_strays']: @@ -204,7 +203,7 @@ class RuleProcessor(object): action = 'removed from all groups' else: action = 'with groups processed' - action_summary_description.append(['adobe_strays_processed', 'Number of Adobe-only users ' + action + ':']) + action_summary_description.append(['adobe_strays_processed', 'Number of Adobe-only users ' + action]) # prepare the network summary umapi_summary_format = 'Number of%s%s UMAPI actions sent (total, success, error)' @@ -436,6 +435,7 @@ class RuleProcessor(object): stray_count = len(self.get_stray_keys()) if self.stray_list_output_path: self.write_stray_key_map() + if self.will_manage_strays: max_missing = self.options['max_adobe_only_users'] if stray_count > max_missing: self.logger.critical('Unable to process Adobe-only users, as their count (%s) is larger '
Add PHP-Noise API to Art & Design PHP-Noise is a Noise-Background-Image-Generator. The API is documented right under the form on the website. For more documentation, you can provide the help parameter to the API.
@@ -117,6 +117,7 @@ API | Description | Auth | HTTPS | CORS | | [Icons8](http://docs.icons8.apiary.io/#reference/0/meta) | Icons | `OAuth` | Yes | Unknown | | [Metropolitan Museum of Art](https://metmuseum.github.io/) | Met Museum of Art | No | Yes | Unknown | | [Noun Project](http://api.thenounproject.com/index.html) | Icons | `OAuth` | No | Unknown | +| [PHP-Noise](https://php-noise.com/) | Noise Background Image Generator | No | Yes | Yes | | [Pixel Encounter](https://pixelencounter.com/api) | SVG Icon Generator | No | Yes | No | | [Rijksmuseum](https://www.rijksmuseum.nl/en/api) | Art | `apiKey` | Yes | Unknown |
Updated DNS Made Easy API Documentation link * Updated DNS Made Easy API Documentation link Updated API Docs link to the current one * Reload CI
@@ -43,7 +43,7 @@ The current supported providers are: - Dinahosting ([docs](https://en.dinahosting.com/api)) - DirectAdmin ([docs](https://www.directadmin.com/features.php?id=504)) - DNSimple ([docs](https://developer.dnsimple.com/)) -- DnsMadeEasy ([docs](http://www.dnsmadeeasy.com/pdf/API-Docv2.pdf)) +- DnsMadeEasy ([docs](https://api-docs.dnsmadeeasy.com/?version=latest)) - DNSPark ([docs](https://dnspark.zendesk.com/entries/31210577-REST-API-DNS-Documentation)) - DNSPod ([docs](https://support.dnspod.cn/Support/api)) - Dreamhost ([docs](https://help.dreamhost.com/hc/en-us/articles/217560167-API_overview))
Update smokeloader.txt and
@@ -333,7 +333,7 @@ statexadver3552ap93.club statexadver3552mn12.club swissmarine.club zel.biz -(advertmarin|advertserv|advertstat|advexmai|cmailadvert|dsmaild|kxserv|kxservxmar|mailadvert|mailserv|mailsmall|mailstat|sdstat|smantex|starserver|statexadver|zmailserv)[0-9][0-9a-z]+\.(com|club|world) +(advertmarin|advertpage|advertserv|advertstat|advexmai|cmailadvert|dsmaild|kxserv|kxservxmar|mailadvert|mailserv|mailsmall|mailstat|sdstat|smantex|starserver|statexadver|zmailserv)[0-9][0-9a-z]+\.(com|club|world) # Reference: https://www.virustotal.com/gui/file/b1b974ceee5968a8453e015356edfded1e9dcba5dda50320f78abf24a4a3e0dd/relations @@ -346,6 +346,7 @@ zel.biz /logstat95/ /logstatx77/ /serverlogs29/ +/serverstat315/ # Reference: https://twitter.com/benkow_/status/1164894072580071424
[tune/release] Make long running distributed PBT cheaper The test currently uses 6 GPUs out of 8 available, so we can get rid of one instance. Savings will be 25% for one instance less (3 instead of 4).
@@ -10,8 +10,8 @@ head_node_type: worker_node_types: - name: worker_node instance_type: g3.8xlarge - min_workers: 3 - max_workers: 3 + min_workers: 2 + max_workers: 2 use_spot: false aws:
Update hive_ransomware.txt Minor update for Ref section
# See the file 'LICENSE' for copying permission # Reference: https://twitter.com/fbgwls245/status/1408632067181604865 +# Reference: https://otx.alienvault.com/pulse/60db5d29be7b348bae7da15f # Reference: https://www.virustotal.com/gui/file/77a398c870ad4904d06d455c9249e7864ac92dda877e288e5718b3c8d9fc6618/detection hivecust6vhekztbqgdnkks64ucehqacge3dij3gyrrpdp57zoq3ooqd.onion
scripts/eddn-report: EliteLogAgent latest is 2.0.0.660 This allows for ignoring the ancient 0.9.2.412 that showed some errors yesterday.
@@ -174,6 +174,11 @@ def process_file(input_file: str) -> None: else: print(line) + elif matches.group('software_name') == 'EliteLogAgent': + # Apparently a Barry Carylon project, but no home page ? + if matches.group('software_version') == '2.0.0.660': + print(line) + ################################################################### # Issues we know about, but haven't yet alerted developers to ###################################################################
Update login.html Use SITE_NAME for login box title on login page (with default value). This can be useful when using multiple powerdns admin in an organization.
<body class="hold-transition login-page"> <div class="login-box"> <div class="login-logo"> - <a href="{{ url_for('index.index') }}"><b>{{ SITE_NAME }}</b></a> + <a href="{{ url_for('index.index') }}"> + {% if SETTING.get('site_name') %} + <b>{{ SITE_NAME }}</b> + {% else %} + <b>PowerDNS</b>-Admin + {% endif %} + </a> </div> <!-- /.login-logo --> <div class="login-box-body">
Cabana: fix auto zoom y-axis for multiple line series auto zoom y-axis for multiple series
@@ -365,10 +365,10 @@ void ChartView::updateAxisY() { } } else { for (auto &s : sigs) { - for (int i = 0; i < s.series->count(); ++i) { - double y = s.series->at(i).y(); - if (y < min_y) min_y = y; - if (y > max_y) max_y = y; + auto begin = std::lower_bound(s.vals.begin(), s.vals.end(), axis_x->min(), [](auto &p, double x) { return p.x() < x; }); + for (auto it = begin; it != s.vals.end() && it->x() <= axis_x->max(); ++it) { + if (it->y() < min_y) min_y = it->y(); + if (it->y() > max_y) max_y = it->y(); } } }
[IMPR] Faster lookup for write actions Use a set instead a tuple for loopup which is upto 20 times faster.
@@ -1221,7 +1221,7 @@ class Request(MutableMapping): # Actions that imply database updates on the server, used for various # things like throttling or skipping actions when we're in simulation # mode - self.write = self.action in ( + self.write = self.action in { 'block', 'clearhasmsg', 'createaccount', 'delete', 'edit', 'emailuser', 'filerevert', 'flowthank', 'imagerotate', 'import', 'managetags', 'mergehistory', 'move', 'options', 'patrol', @@ -1233,7 +1233,7 @@ class Request(MutableMapping): 'wbremovequalifiers', 'wbremovereferences', 'wbsetaliases', 'wbsetclaim', 'wbsetclaimvalue', 'wbsetdescription', 'wbsetlabel', 'wbsetqualifier', 'wbsetreference', 'wbsetsitelink', - ) + } # Client side verification that the request is being performed # by a logged in user, and warn if it isn't a config username. if self.write:
Slight correction on nu_to_M for circular orbits Corrected to simply the nu_to_M case for circular orbit (M=E=nu @ e=0)
@@ -13,7 +13,7 @@ def danby_coe(k, p, ecc, inc, raan, argp, nu, tof, numiter=20, rtol=1e-8): if ecc == 0: # Solving for circular orbit - M0 = E_to_M(nu_to_E(nu, ecc), ecc) + M0 = nu # for circular orbit M = E = nu M = M0 + n * tof nu = M - 2 * np.pi * np.floor(M / 2 / np.pi) return nu
shell/pkgcore.sh: _choose(): break retry loop after 3 invalid inputs [skip ci]
@@ -27,20 +27,30 @@ unset PKGSHELL SCRIPTDIR # usage: _choose "${array[@]}" # returns: index of array choice (assuming array indexing starts at 1) _choose() { - local choice x i=0 num_opts=$# + local choice num_opts=$# + + # show available choices + local x i=0 for x in $@; do echo " $(( ++i )): ${x}" >&2 done + + # read user choice, checking for invalid values + local invalid=0 while true; do echo -n "Please select one: " >&2 choice=$(_read_nchars ${#num_opts}) if [[ ! ${choice} =~ [0-9]+ || ${choice} -lt 1 || ${choice} -gt ${num_opts} ]]; then + (( invalid++ )) echo " -- Invalid choice!" >&2 + # three invalids seen, giving up + [[ ${invalid} -gt 2 ]] && break continue fi echo >&2 break done + # default to array indexes starting at 0 (( choice-- )) echo $(_array_index ${choice})
Transfers: fix exceptions in globus poller. Closes Globus tries to set transfer to success without an external id. Also, __str__ for the globus transfertool can return None, which is not expected for this method.
@@ -57,10 +57,11 @@ def bulk_group_transfers(transfer_paths, policy='single', group_bulk=200): class GlobusTransferStatusReport(TransferStatusReport): supported_db_fields = [ - 'state' + 'state', + 'external_id', ] - def __init__(self, request_id, globus_response): + def __init__(self, request_id, external_id, globus_response): super().__init__(request_id) if globus_response == 'FAILED': @@ -71,6 +72,9 @@ class GlobusTransferStatusReport(TransferStatusReport): new_state = RequestState.SUBMITTED self.state = new_state + self.external_id = None + if new_state in [RequestState.FAILED, RequestState.DONE]: + self.external_id = external_id def initialize(self, session, logger=logging.log): pass @@ -89,6 +93,8 @@ class GlobusTransferTool(Transfertool): :param external_host: The external host where the transfertool API is running """ + if not external_host: + external_host = 'Globus Online Transfertool' super().__init__(external_host, logger) self.group_bulk = group_bulk self.group_policy = group_policy @@ -191,7 +197,7 @@ class GlobusTransferTool(Transfertool): response = {} for transfer_id, requests in requests_by_eid.items(): for request_id in requests: - response.setdefault(transfer_id, {})[request_id] = GlobusTransferStatusReport(request_id, job_responses[transfer_id]) + response.setdefault(transfer_id, {})[request_id] = GlobusTransferStatusReport(request_id, transfer_id, job_responses[transfer_id]) return response def bulk_update(self, resps, request_ids):
Stop leaking file descriptors and other tidying
@@ -20,8 +20,9 @@ def init_save_file(webui: "RedWebUi") -> str: fd, webui.save_path = tempfile.mkstemp( prefix="", dir=webui.config["save_dir"] ) + os.close(fd) return os.path.split(webui.save_path)[1] - except (OSError, IOError): + except OSError: # Don't try to store it. pass return None # should already be None, but make sure @@ -31,10 +32,9 @@ def save_test(webui: "RedWebUi", top_resource: HttpResource) -> None: """Save a test by test_id.""" if webui.test_id: try: - tmp_file = gzip.open(webui.save_path, "w") + with gzip.open(webui.save_path, "w") as tmp_file: pickle.dump(top_resource, tmp_file) - tmp_file.close() - except (IOError, zlib.error, pickle.PickleError): + except (OSError, zlib.error, pickle.PickleError): pass # we don't cry if we can't store it. @@ -54,7 +54,7 @@ def extend_saved_test(webui: "RedWebUi") -> None: location = b"%s&descend=True" % location webui.exchange.response_start(b"303", b"See Other", [(b"Location", location)]) webui.output("Redirecting to the saved test page...") - except (OSError, IOError): + except OSError: webui.exchange.response_start( b"500", b"Internal Server Error", @@ -67,11 +67,13 @@ def extend_saved_test(webui: "RedWebUi") -> None: def load_saved_test(webui: "RedWebUi") -> None: """Load a saved test by test_id.""" try: - fd = gzip.open( + with gzip.open( os.path.join(webui.config["save_dir"], os.path.basename(webui.test_id)) - ) + ) as fd: mtime = os.fstat(fd.fileno()).st_mtime - except (OSError, IOError, TypeError, zlib.error): + is_saved = mtime > thor.time() + top_resource = pickle.load(fd) + except (OSError, TypeError): webui.exchange.response_start( b"404", b"Not Found", @@ -83,10 +85,7 @@ def load_saved_test(webui: "RedWebUi") -> None: webui.output("I'm sorry, I can't find that saved response.") webui.exchange.response_done([]) return - is_saved = mtime > thor.time() - try: - top_resource = pickle.load(fd) - except (pickle.PickleError, IOError, EOFError): + except (pickle.PickleError, zlib.error, EOFError): webui.exchange.response_start( b"500", b"Internal Server Error", @@ -98,8 +97,6 @@ def load_saved_test(webui: "RedWebUi") -> None: webui.output("I'm sorry, I had a problem loading that.") webui.exchange.response_done([]) return - finally: - fd.close() if webui.check_name: display_resource = top_resource.subreqs.get(webui.check_name, top_resource)
[internal] Re-enable skipped JVM inference tests. Fixes [ci skip-build-wheels]
@@ -325,7 +325,7 @@ def test_infer_java_imports_same_target_with_cycle(rule_runner: RuleRunner) -> N ) == InferredDependencies(dependencies=[target_a.address]) [email protected](reason="https://github.com/pantsbuild/pants/issues/13056") +@maybe_skip_jdk_test def test_dependencies_from_inferred_deps(rule_runner: RuleRunner) -> None: rule_runner.write_files( { @@ -406,7 +406,7 @@ def test_dependencies_from_inferred_deps(rule_runner: RuleRunner) -> None: ) [email protected](reason="https://github.com/pantsbuild/pants/issues/13056") +@maybe_skip_jdk_test def test_package_private_dep(rule_runner: RuleRunner) -> None: rule_runner.write_files( {
Update 1.0.8 Changelog Update CHANGES.md
* With the new `cron_schedule` argument to `TimeWindowPartitionsDefinition`, you can now supply arbitrary cron expressions to define time window-based partition sets. * Graph-backed assets can now be subsetted for execution via `AssetsDefinition.from_graph(my_graph, can_subset=True)`. * `RunsFilter` is now exported in the public API. +* [dagster-k8s] The `dagster-user-deployments.deployments[].schedulerName` Helm value for specifying custom Kubernetes schedulers will now also apply to run and step workers launched for the given user deployment. Previously it would only apply to the grpc server. ### Bugfixes
Prevent a race between registering and shutting down a service May also help with the test issue seen in and worked around in
@@ -186,6 +186,9 @@ class ClientMonitor(object): if not isinstance(message, tuple): raise TypeError("Expected tuple, got " + str(type(message))) command, arg = message + with lock: + if exiting.is_set(): + raise RuntimeError("service is exiting, cannot connect") if command == "register": process = psutil.Process(int(arg)) with self.cond:
log2html: Render log entries with javascript Instead of generating a static file, save log data as JSON and render it dynamically with javascript. This is in preparation to support live filtering on things like debug level and log content.
import os import re import sys +import json import logging import argparse @@ -32,20 +33,46 @@ HTML_TEMPLATE = """<head> overflow-x: auto; }} </style> + <script> + var content = {content}; + + function createEntry(entry) {{ + var outer = document.createElement("div"); + outer.className = "box_log"; + + var details = document.createElement("details"); + outer.appendChild(details); + + var summary = document.createElement("summary"); + summary.innerText = entry[0] + " " + entry[2]; + details.appendChild(summary); + + var desc = document.createElement("pre"); + details.appendChild(desc); + + details.addEventListener("toggle", event => {{ + if (details.open) {{ + desc.innerText = entry[3]; + + }} + }}); + + return outer; + }} + + window.onload = function loadData() {{ + for (const entry of content) {{ + document.getElementById("entries").appendChild(createEntry(entry)); + }} + }} + </script> + </head> <body> - {logs} + <div id="entries" /> </body> """ -LOG_TEMPLATE = """ <div class="box_log"> - <details> - <summary>{summary}</summary> - <pre>{details}</pre> - </details> - </div> -""" - def parse_logs(stream): """Parse lines in a log and return entries.""" @@ -73,20 +100,13 @@ def parse_logs(stream): def generate_log_page(stream, output): """Generate HTML output for log output.""" - logs = [] - for logpoint in parse_logs(stream): - date = logpoint[0] - first_line = logpoint[2] - content = logpoint[3] - - summary = date + " " + first_line[first_line.find(" ") :] - logs.append(LOG_TEMPLATE.format(summary=summary, details=content)) + logs = list(parse_logs(stream)) if not logs: _LOGGER.warning("No log points found, not generating output") return - page = HTML_TEMPLATE.format(logs="\n".join(logs)) + page = HTML_TEMPLATE.format(content=json.dumps(logs)) if not output: print(page) else:
better YAML conversion support Expand the conversion tool to support optionally providing the hardware role name, the IPMI username and the IPMI password
@@ -36,7 +36,7 @@ import sys import yaml -def to_yaml(path, cluster_name): +def to_yaml(path, cluster_name, hardware_type, ipmi_username, ipmi_password): """ Opens and reads the given file, which is assumed to be in cluster.txt format. Returns a string with cluster.txt converted to YAML or dies trying. @@ -64,11 +64,11 @@ def to_yaml(path, cluster_name): 'mac_address': mac, 'ip_address': ip, 'ipmi_address': ipmi, - 'ipmi_username': None, - 'ipmi_password': None, + 'ipmi_username': ipmi_username, + 'ipmi_password': ipmi_password, 'domain': domain, 'role': role, - 'hardware_type': None, + 'hardware_type': hardware_type, 'cobbler_profile': 'bcpc_host', } @@ -124,6 +124,19 @@ def main(): "-t", "--text", help="convert from YAML to text (THIS WILL DISCARD DATA)", action="store_true") + parser.add_argument( + "-H", "--hardware_type", + default=None, + help="chef hardware role name") + parser.add_argument( + "-U", "--ipmi_username", + default=None, + help="IPMI username") + parser.add_argument( + "-P", "--ipmi_password", + default=None, + help="IPMI password") + args = parser.parse_args() if not os.path.exists(args.path) or not os.access(args.path, os.R_OK): @@ -132,7 +145,11 @@ def main(): if args.text: print(to_text(args.path)) else: - print(to_yaml(args.path, args.cluster_name)) + print(to_yaml(args.path, + args.cluster_name, + args.hardware_type, + args.ipmi_username, + args.ipmi_password)) if __name__ == "__main__":
Fix named arguments after kwargs In Python3, you're unable to send named parameters after **kwargs * Use single quotes
@@ -1281,7 +1281,8 @@ class TelegramClient(TelegramBareClient): def send_voice_note(self, *args, **kwargs): """Wrapper method around .send_file() with is_voice_note=True""" - return self.send_file(*args, **kwargs, is_voice_note=True) + kwargs['is_voice_note'] = True + return self.send_file(*args, **kwargs) def _send_album(self, entity, files, caption=None, progress_callback=None, reply_to=None):
configure-system: Skip some steps on virtual Do not configure processor microcode or IPMI on virtual builds; it's time consuming and pointless. This improves build time of a 1h1w by ~2% in my lab.
# IPMI module configuration and loading +- name: Configure the kernel's IPMI module + block: - name: Load ipmi_devintf kernel module modprobe: name: ipmi_devintf mode: '0644' vars: kernel_module_name: ipmi_devintf + when: ansible_virtualization_role == "NA" # ip_conntrack module configuration and loading - name: Configure ip_conntrack kernel module when loaded name: kexec-tools # Control processor microcode application +- name: Control processor microcode application + block: - name: Check if AMD processor command: grep -q AuthenticAMD /proc/cpuinfo ignore_errors: true - name: Update initramfs for all kernels command: update-initramfs -uk all when: amd_microcode.changed or intel_microcode.changed + when: ansible_virtualization_role == "NA"
Fix `intersphinx_mapping` in the Sphinx config Allows Sphinx linking of classes, functions, and modules across projects, e.g. :class:`~mxnet.gluon.HybridBlock`.
# documentation root, use os.path.abspath to make it absolute, like shown here. # # import os -# import sys +import sys + # sys.path.insert(0, os.path.abspath('.')) @@ -176,4 +177,10 @@ texinfo_documents = [ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} +intersphinx_mapping = { + 'python': (f'https://docs.python.org/{sys.version_info.major}', None), + 'mxnet': ('https://mxnet.apache.org/', None), + 'numpy': ('http://docs.scipy.org/doc/numpy/', None), + 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None), + 'matplotlib': ('http://matplotlib.org/', None), +}
offset baseline separators from actual baseline in dataset Otherwise our postprocessor produced cut off lines
@@ -596,12 +596,18 @@ class BaselineSet(Dataset): return self.transform(im, target) @staticmethod - def _get_ortho_line(lineseg, point): + def _get_ortho_line(lineseg, point, line_width, offset): lineseg = np.array(lineseg) norm_vec = lineseg[1,...] - lineseg[0,...] norm_vec_len = np.sqrt(np.sum(norm_vec**2)) unit_vec = norm_vec / norm_vec_len ortho_vec = unit_vec[::-1] * ((1,-1), (-1,1)) + print('{} {}'.format(point, lineseg)) + if offset == 'l': + point -= unit_vec * line_width + else: + point += unit_vec * line_width + print('after: {}'.format(point)) return (ortho_vec * 10 + point).astype('int').tolist() def transform(self, image, target): @@ -618,9 +624,9 @@ class BaselineSet(Dataset): for point in line: l.append((int(point[0]*scale), int(point[1]*scale))) line_mask.line(l, fill=255, width=self.line_width) - sep_1 = [tuple(x) for x in self._get_ortho_line(l[:2], l[0])] + sep_1 = [tuple(x) for x in self._get_ortho_line(l[:2], l[0], self.line_width, 'l')] separator_mask.line(sep_1, fill=255, width=self.line_width) - sep_2 = [tuple(x) for x in self._get_ortho_line(l[-2:], l[-1])] + sep_2 = [tuple(x) for x in self._get_ortho_line(l[-2:], l[-1], self.line_width, 'r')] separator_mask.line(sep_2, fill=255, width=self.line_width) del line_mask del separator_mask
[modules/git] Fix recursion error Endless loop caused by stupid bug when trying to find the root git repository.
@@ -22,7 +22,7 @@ class Module(bumblebee.engine.Module): super(Module, self).__init__(engine, config, bumblebee.output.Widget(full_text=self.gitinfo) ) - self._fmt = self.parameter("format", "{branch}") + self._fmt = self.parameter("format", "{branch} - {directory}") def gitinfo(self, widget): info = "" @@ -35,16 +35,17 @@ class Module(bumblebee.engine.Module): directory = self._get_git_root(directory) repo = pygit2.Repository(directory) data["branch"] = repo.head.shorthand + data["directory"] = directory except Exception as e: return e return string.Formatter().vformat(self._fmt, (), data) def _get_git_root(self, directory): - while directory != "/": + while len(directory) > 1: if os.path.exists(os.path.join(directory, ".git")): return directory directory = "/".join(directory.split("/")[0:-1]) - return None + return "/" # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
added docstring to download multi pdf also made the PDF name as a parameter which is passed by the user
@@ -17,7 +17,44 @@ standard_format = "templates/print_formats/standard.html" @frappe.whitelist() def download_multi_pdf(doctype, name, format=None): - # name can include names of many docs of the same doctype. + """ + Concatenate multiple docs as PDF . + + Returns a PDF compiled by concatenating multiple documents. The documents + can be from a single DocType or multiple DocTypes + + Note: The design may seem a little weird, but it exists exists to + ensure backward compatibility. The correct way to use this function is to + pass a dict to doctype as described below + + NEW FUNCTIONALITY + ================= + Parameters: + doctype (dict): + key (string): DocType name + value (list): of strings of doc names which need to be concatenated and printed + name (string): + name of the pdf which is generated + format: + Print Format to be used + + Returns: + PDF: A PDF generated by the concatenation of the mentioned input docs + + OLD FUNCTIONALITY - soon to be deprecated + ========================================= + Parameters: + doctype (string): + name of the DocType to which the docs belong which need to be printed + name (string or list): + If string the name of the doc which needs to be printed + If list the list of strings of doc names which needs to be printed + format: + Print Format to be used + + Returns: + PDF: A PDF generated by the concatenation of the mentioned input docs + """ import json output = PdfFileWriter() @@ -33,7 +70,7 @@ def download_multi_pdf(doctype, name, format=None): for doctype_name in doctype: for doc_name in doctype[doctype_name]: output = frappe.get_print(doctype_name, doc_name, format, as_pdf = True, output = output) - frappe.local.response.filename = "{}.pdf".format(frappe.session.user.replace('@', '-')) + frappe.local.response.filename = "{}.pdf".format(name) frappe.local.response.filecontent = read_multi_pdf(output) frappe.local.response.type = "download"
Don't generate core name when adding core to corepool. When core frome corepool is added to cache, it's old name is loaded and cores in corepool cannot be referenced by name anyway so new name is not needed.
@@ -534,8 +534,7 @@ int cache_mngt_prepare_core_cfg(struct ocf_mngt_core_config *cfg, struct kcas_insert_core *cmd_info) { struct block_device *bdev; - static char core_name[OCF_CORE_NAME_SIZE]; - struct cache_priv *cache_priv; + char core_name[OCF_CORE_NAME_SIZE] = {}; ocf_cache_t cache; uint16_t core_id; int result; @@ -546,9 +545,10 @@ int cache_mngt_prepare_core_cfg(struct ocf_mngt_core_config *cfg, if (cmd_info->core_id == OCF_CORE_MAX) { result = mngt_get_cache_by_id(cas_ctx, cmd_info->cache_id, &cache); - if (result) + if (result && result != -OCF_ERR_CACHE_NOT_EXIST) { return result; - + } else if (!result) { + struct cache_priv *cache_priv; cache_priv = ocf_cache_get_priv(cache); ocf_mngt_cache_put(cache); @@ -558,6 +558,7 @@ int cache_mngt_prepare_core_cfg(struct ocf_mngt_core_config *cfg, cmd_info->core_id = core_id; } + } snprintf(core_name, sizeof(core_name), "core%d", cmd_info->core_id);
Added drop_nodes call to tests/constants.py Also rearanged dropping: Network (+Edges) -> Nodes (+Modifications) -> Namespaces -> Annotations
@@ -6,14 +6,13 @@ import tempfile import unittest from json import dumps -from requests.compat import urlparse - from pybel import BELGraph from pybel.constants import * from pybel.manager.cache import CacheManager from pybel.parser.parse_bel import BelParser from pybel.parser.parse_exceptions import * from pybel.parser.utils import any_subdict_matches +from requests.compat import urlparse log = logging.getLogger(__name__) @@ -199,9 +198,10 @@ class FleetingTemporaryCacheMixin(TemporaryCacheClsMixin): def setUp(self): super(FleetingTemporaryCacheMixin, self).setUp() + self.manager.drop_networks() + self.manager.drop_nodes() self.manager.drop_namespaces() self.manager.drop_annotations() - self.manager.drop_networks() class TestTokenParserBase(unittest.TestCase):
Fix tests again Third times the charm to fix a find-replace error this time.
@@ -1055,7 +1055,7 @@ def test_xmp_sidecar_gps(): <rdf:li>London</rdf:li> <rdf:li>United Kingdom</rdf:li> <rdf:li>London 2018</rdf:li> - <rdf:li>St. James&#39&#39;s Park</rdf:li> + <rdf:li>St. James&#39;s Park</rdf:li> </rdf:Seq> </digiKam:TagsList> </rdf:Description>
Add the stemming library to the docker file Stemming is a useful tool for text preprocessing, and would be nice to have available in the kernels notebook
@@ -306,6 +306,7 @@ RUN pip install --upgrade mpld3 && \ pip install python-louvain && \ pip install pyexcel-ods && \ pip install sklearn-pandas && \ + pip install stemming && \ ##### ^^^^ Add new contributions above here # clean up pip cache rm -rf /root/.cache/pip/* && \
Add fake entry for process testing Add `| Dave Machado | Dave Machado's Website | No | Yes | [Go!](http://www.davemachado.com) |` under `Development` for testing of deployment script and updating JSON.
@@ -155,6 +155,7 @@ API | Description | Auth | HTTPS | Link | | Adorable Avatars | Generate random cartoon avatars | No | Yes | [Go!](http://avatars.adorable.io) | | APIs.guru | Wikipedia for Web APIs, OpenAPI/Swagger specs for public APIs | No | Yes | [Go!](https://apis.guru/api-doc/) | | CDNJS | Library info on CDNJS | No | Yes | [Go!](https://api.cdnjs.com/libraries/jquery) | +| Dave Machado | Dave Machado's Website | No | Yes | [Go!](http://www.davemachado.com) | | Faceplusplus | A tool to detect face | `OAuth` | Yes | [Go!](https://www.faceplusplus.com/) | | Genderize.io | Determines a gender from a first name | No | Yes | [Go!](https://genderize.io) | | Github - User Data | Pull public information for a user's github | No | Yes | [Go!](https://api.github.com/users/hackeryou) |
[Chore] Add password-filename daemons' modules option Problem: Currently modules don't support password-encrypted keys. Solution: Add a password-filename option that allows providing password for the password-endcrypted keys.
@@ -37,6 +37,13 @@ rec { ''; }; + passwordFilename = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + Path to the file with passwords that can be used to decrypt encrypted keys. + ''; + }; }; genDaemonConfig = { instancesCfg, service-name, service-pkgs, service-start-script, service-prestart-script ? (_: "")}: @@ -44,6 +51,7 @@ rec { users = mkMerge (flip mapAttrsToList instancesCfg (node-name: node-cfg: genUsers node-name )); systemd = mkMerge (flip mapAttrsToList instancesCfg (node-name: node-cfg: let tezos-service = service-pkgs."${node-cfg.baseProtocol}"; + passwordFilenameArg = if node-cfg.passwordFilename != null then "-f ${node-cfg.passwordFilename}" else ""; in { services."tezos-${node-name}-tezos-${service-name}" = genSystemdService node-name node-cfg service-name // rec { bindsTo = [ "network.target" "tezos-${node-name}-tezos-node.service" ]; @@ -61,10 +69,10 @@ rec { # Generate or update service config file if [[ ! -f "$service_data_dir/config" ]]; then - ${tezos-service} -d "$service_data_dir" -E "http://localhost:${toString node-cfg.rpcPort}" \ + ${tezos-service} -d "$service_data_dir" -E "http://localhost:${toString node-cfg.rpcPort}" ${passwordFilenameArg} \ config init --output "$service_data_dir/config" >/dev/null 2>&1 else - ${tezos-service} -d "$service_data_dir" -E "http://localhost:${toString node-cfg.rpcPort}" \ + ${tezos-service} -d "$service_data_dir" -E "http://localhost:${toString node-cfg.rpcPort}" ${passwordFilenameArg} \ config update >/dev/null 2>&1 fi '' + service-prestart-script node-cfg;
New session per request, or context manager Mimics the behaviour of the Session class of the requests library
@@ -47,9 +47,10 @@ class Requests(object): Wraps the requests library to simplify use with JSON REST APIs. Sets auth headers automatically, and requests JSON responses by - default. To accommodate authenticating non-API requests easily, it - uses a single session across all requests made with the same - instance. + default. + + To maintain a session of authenticated non-API requests, use + Requests as a context manager. """ def __init__(self, domain_name, base_url, username, password, verify=True): self.domain_name = domain_name @@ -57,15 +58,28 @@ class Requests(object): self.username = username self.password = password self.verify = verify - self.session = requests.Session() + self._session = None + + def __enter__(self): + self._session = requests.Session() + return self + + def __exit__(self, *args): + self._session.close() + self._session = None @log_request - def send_request(self, method_func, *args, **kwargs): + def send_request(self, method, *args, **kwargs): raise_for_status = kwargs.pop('raise_for_status', False) if not self.verify: kwargs['verify'] = False try: - response = method_func(*args, **kwargs) + if self._session: + response = self._session.request(method, *args, **kwargs) + else: + # Mimics the behaviour of requests.api.request() + with requests.Session() as session: + response = session.request(method, *args, **kwargs) if raise_for_status: response.raise_for_status() except requests.RequestException: @@ -81,20 +95,22 @@ class Requests(object): def delete(self, uri, **kwargs): kwargs.setdefault('headers', {'Accept': 'application/json'}) - return self.send_request(self.session.delete, self.get_url(uri), + return self.send_request('DELETE', self.get_url(uri), auth=(self.username, self.password), **kwargs) def get(self, uri, *args, **kwargs): kwargs.setdefault('headers', {'Accept': 'application/json'}) - return self.send_request(self.session.get, self.get_url(uri), *args, + kwargs.setdefault('allow_redirects', True) + return self.send_request('GET', self.get_url(uri), *args, auth=(self.username, self.password), **kwargs) - def post(self, uri, *args, **kwargs): + def post(self, uri, data=None, json=None, *args, **kwargs): kwargs.setdefault('headers', { 'Content-type': 'application/json', 'Accept': 'application/json' }) - return self.send_request(self.session.post, self.get_url(uri), *args, + return self.send_request('POST', self.get_url(uri), *args, + data=data, json=json, auth=(self.username, self.password), **kwargs)
Update sentinel-2.yaml Hi, I've just added my tutorial. I did this because a lot of the content I found online was outdated and not working.
@@ -65,6 +65,10 @@ Resources: Type: SNS Topic DataAtWork: Tutorials: + - Title: How to Work with Landsat and Sentinel-2 on AWS with Python + URL: https://www.matecdev.com/posts/landsat-sentinel-aws-s3-python.html + AuthorName: Martin D. Maas + AuthorURL: https://www.matecdev.com - Title: Getting Started With Geospatial Data Analysis URL: https://github.com/samx18/geospatial_analysis/blob/main/geospatial_analysis.ipynb AuthorName: Sam Palani
Fix typo changed header to header.txt
@@ -23,7 +23,7 @@ recipe: head -n1 dbNSFP*_variant.chrM > $UNPACK_DIR/header.txt rm dbNSFP*_variant.chrM # unzip only files with chromosomal info, eg. skip genes and readme. - cat $UNPACK_DIR/header > dbNSFP.txt + cat $UNPACK_DIR/header.txt > dbNSFP.txt unzip -p dbNSFPv*.zip "dbNSFP*_variant.chr*" | grep -v '^#chr' | sort -T $UNPACK_DIR -k1,1 -k2,2n >> dbNSFP.txt bgzip dbNSFP.txt #extract readme file, used by VEP plugin to add vcf header info
Remove alpha warning for 'projects.create' API. No longer alpha / invite-only. Closes
@@ -23,18 +23,6 @@ With this API, you can do the following: Don't forget to look at the :ref:`Authentication` section below. It's slightly different from the rest of this library. -.. warning:: - - Alpha - - The `projects.create() API method`_ is in the Alpha stage. It might be changed in - backward-incompatible ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. Access to this feature is - currently invite-only. For an invitation, contact our sales team at - https://cloud.google.com/contact. - -.. _projects.create() API method: https://cloud.google.com/resource-manager/docs/creating-project - Installation ------------
swap vanguard with yield curve for a more pleasant gallery viewing experience
@@ -114,7 +114,23 @@ layout = html.Div(className='gallery', children=[ and to push data. ''', ), + AppSection( + app_name='3-D Yield Curve', + app_link='https://dash-yield-curve.plot.ly', + code_link='https://github.com/plotly/dash-yield-curve', + img_src='assets/images/gallery/dash-yield-curve-app.png', + description=''' + This Dash app adapts the New York Times' excellent + report: [A 3-D View of a Chart That Predicts The Economic Future: The Yield Curve](https://www.nytimes.com/interactive/2015/03/19/upshot/3d-yield-curve-economic-growth.html). + + Dash comes with a wide range of interactive 3-D chart types, + such as 3-D scatter plots, surface plots, network graphs and ribbon plots. + [View more 3-D chart examples](https://plot.ly/python/3d-charts/). + ''' + ) + ]), + reusable.Row([ AppSection( app_name='Vanguard Report', app_link='https://dash-gallery.plotly.host/dash-vanguard-report', @@ -136,26 +152,8 @@ layout = html.Div(className='gallery', children=[ same framework for both the rich interactive applications and the static PDF reports. ''' - ) - - ]), - - reusable.Row([ - AppSection( - app_name='3-D Yield Curve', - app_link='https://dash-yield-curve.plot.ly', - code_link='https://github.com/plotly/dash-yield-curve', - img_src='assets/images/gallery/dash-yield-curve-app.png', - description=''' - This Dash app adapts the New York Times' excellent - report: [A 3-D View of a Chart That Predicts The Economic Future: The Yield Curve](https://www.nytimes.com/interactive/2015/03/19/upshot/3d-yield-curve-economic-growth.html). - - Dash comes with a wide range of interactive 3-D chart types, - such as 3-D scatter plots, surface plots, network graphs and ribbon plots. - [View more 3-D chart examples](https://plot.ly/python/3d-charts/). - ''' - ), + , AppSection( app_name='Recession in 255 Charts',
Pickling DateTime Objects with Timezones Closes
@@ -28,6 +28,9 @@ class StaticTzInfo(tzinfo): raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) + def __getinitargs__(self): + return self.__name, self.__offset + def pop_tz_offset_from_string(date_string, as_offset=True): for name, info in _tz_offsets:
Additional operator information values Summary: Pull Request resolved: Closes Modified the values reported by the benchmarking platform to include tensor_shape and op_args. These values have a different naming scheme to values like flops and latency.
namespace caffe2 { const std::string NetObserverReporterPrint::IDENTIFIER = "Caffe2Observer "; +static std::string get_op_args(PerformanceInformation p); +static std::string get_tensor_shapes(PerformanceInformation p); void NetObserverReporterPrint::report( NetBase* net, @@ -27,6 +29,9 @@ void NetObserverReporterPrint::report( {"flops", {{"value", "-1"}, {"unit", "flops"}}}}; } else if (p.first != "NET_DELAY") { // for operator perf + std::string shape_str = get_tensor_shapes(p.second); + std::string args_str = get_op_args(p.second); + caffe2_perf[p.first] = { {"latency", {{"value", caffe2::to_string(p.second.latency * 1000)}, @@ -36,7 +41,9 @@ void NetObserverReporterPrint::report( "value", caffe2::to_string(p.second.flops), }, - {"unit", "flops"}}}}; + {"unit", "flops"}}}, + {"tensor_shapes", {{"info_string", shape_str}, {"unit", ""}}}, + {"op_args", {{"info_string", args_str}, {"unit", ""}}}}; } } @@ -67,4 +74,52 @@ void NetObserverReporterPrint::report( LOG(INFO) << buffer.str(); } } + +static std::string get_tensor_shapes(PerformanceInformation p) { + std::string shape_str; + std::stringstream shape_stream; + if (!p.tensor_shapes.empty()) { + shape_stream << "["; + for (int i = 0; i < p.tensor_shapes.size(); i++) { + shape_stream << "["; + for (int j = 0; j < p.tensor_shapes[i].dims_size(); j++) { + shape_stream << p.tensor_shapes[i].dims(j) << ", "; + } + shape_stream << "], "; + } + shape_stream << "]"; + shape_str = shape_stream.str(); + } else { + shape_str = "[]"; + } + return shape_str; +} + +static std::string get_op_args(PerformanceInformation p) { + std::string args_str; + if (!p.args.empty()) { + std::stringstream args; + args << "["; + for (int i = 0; i < p.args.size(); i++) { + args << "{" << p.args[i].name() << ": "; + if (p.args[i].has_i()) { + args << p.args[i].i(); + } else if (p.args[i].has_s()) { + args << p.args[i].s(); + } else if (p.args[i].has_n()) { + args << &p.args[i].n(); + } else if (p.args[i].has_f()) { + args << p.args[i].f(); + } else { + args << "None"; + } + args << "}, "; + } + args << "]"; + args_str = args.str(); + } else { + args_str = "[]"; + } + return args_str; +} }
Python3.5: Fixup for versions before 3.5.2 * The API used was introduced in a minor version. * While at it, inline the copy with our own stuff for slight increase in speed.
@@ -89,6 +89,49 @@ static void Nuitka_Generator_entry_point( struct Nuitka_GeneratorObject *generat } #endif +#if PYTHON_VERSION >= 300 +static void Nuitka_SetStopIterationValue( PyObject *value ) +{ + CHECK_OBJECT( value ); + +#if PYTHON_VERSION <= 352 + PyObject *args[1] = { value }; + PyObject *stop_value = CALL_FUNCTION_WITH_ARGS1( PyExc_StopIteration, args ); + + if (unlikely( stop_value == NULL )) + { + return; + } + + Py_INCREF( PyExc_StopIteration ); + RESTORE_ERROR_OCCURRED( PyExc_StopIteration, stop_value, NULL ); +#else + if (likely( !PyTuple_Check(value) && !PyExceptionInstance_Check(value) )) + { + Py_INCREF( PyExc_StopIteration ); + Py_INCREF( value ); + + RESTORE_ERROR_OCCURRED( PyExc_StopIteration, value, NULL ); + } + else + { + + PyObject *args[1] = { value }; + PyObject *stop_value = CALL_FUNCTION_WITH_ARGS1( PyExc_StopIteration, args ); + + if (unlikely( stop_value == NULL )) + { + return; + } + + Py_INCREF( PyExc_StopIteration ); + + RESTORE_ERROR_OCCURRED( PyExc_StopIteration, stop_value, NULL ); + } +#endif +} +#endif + static PyObject *Nuitka_Generator_send2( struct Nuitka_GeneratorObject *generator, PyObject *value ) { @@ -255,14 +298,7 @@ static PyObject *Nuitka_Generator_send2( struct Nuitka_GeneratorObject *generato { if ( generator->m_returned != Py_None ) { -#if PYTHON_VERSION < 350 - PyObject *args[1] = { generator->m_returned }; - PyObject *stop_value = CALL_FUNCTION_WITH_ARGS1( PyExc_StopIteration, args ); - RESTORE_ERROR_OCCURRED( PyExc_StopIteration, stop_value, NULL ); - Py_INCREF( PyExc_StopIteration ); -#else - _PyGen_SetStopIterationValue( generator->m_returned ); -#endif + Nuitka_SetStopIterationValue( generator->m_returned ); } Py_DECREF( generator->m_returned );
use new setting for part-actions PR complete for
{% block content %} +{% settings_value 'BUY_FUNCTION_ENABLE' as enable_buy %} +{% settings_value 'SELL_FUNCTION_ENABLE' as enable_sell %} +{% settings_value 'PO_FUNCTION_ENABLE' as enable_po %} +{% settings_value 'STOCK_FUNCTION_ENABLE' as enable_stock %} + <div class="panel panel-default panel-inventree"> <!-- Default panel contents --> <div class="panel-heading"><h3>{{ part.full_name }}</h3></div> </div> {% endif %} {% if part.active %} + {% if enable_buy or enable_sell %} <button type='button' class='btn btn-default' id='price-button' title='{% trans "Show pricing information" %}'> <span id='part-price-icon' class='fas fa-dollar-sign'/> </button> - {% if roles.stock.change %} + {% endif %} + {% if roles.stock.change and enable_stock %} <div class='btn-group'> <button id='stock-actions' title='{% trans "Stock actions" %}' class='btn btn-default dropdown-toggle' type='button' data-toggle='dropdown'> <span class='fas fa-boxes'></span> <span class='caret'></span> </ul> </div> {% endif %} - {% if part.purchaseable %} - {% if roles.purchase_order.add %} + {% if part.purchaseable and roles.purchase_order.add %} + {% if enable_buy and enable_po %} <button type='button' class='btn btn-default' id='part-order' title='{% trans "Order part" %}'> <span id='part-order-icon' class='fas fa-shopping-cart'/> </button>
Implement workaround for Symptom was: File "/usr/local/lib/python3.6/asyncio/streams.py", line 214, in _drain_helper assert waiter is None or waiter.cancelled()
@@ -28,6 +28,8 @@ class Link(s_coro.Fini): self.sock = self.writer.get_extra_info('socket') + self._drain_lock = asyncio.Lock(loop=plex.loop) + # disable nagle ( to minimize latency for small xmit ) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # enable TCP keep alives... @@ -83,6 +85,8 @@ class Link(s_coro.Fini): byts = s_msgpack.en(mesg) try: self.writer.write(byts) + # Avoid Python bug. See https://bugs.python.org/issue29930 + async with self._drain_lock: await self.writer.drain() except ConnectionError as e: await self.fini()
Remove code climate upload from docker During the afterbuild phase it was giving an error for fatal: not a git repository (or any parent up to mount point /).
@@ -48,12 +48,6 @@ jobs: uses: ./.github/actions/setup_and_test with: xvfb_command: 'xvfb-run' - - name: Upload Code Coverage to Code Climate - uses: paambaati/[email protected] - env: - CC_TEST_REPORTER_ID: 05f6288b94a87daa172d3e96a33ec331a4374be7d01eb9a42b3b21c4c550a8ff - with: - coverageCommand: poetry run coverage xml - name: Install Build Dependencies run: poetry install --no-dev --extras poethepoet - name: Create Source Dist and Wheel
Compliance Export CSV version correction Corrected the version of Mattermost where CSV files will not be limited to a specific number of rows from v5.35 to v5.36.
@@ -100,7 +100,7 @@ When run manually via the System Console, ``.csv`` and Actiance XML files are wr Is there a maximum row limit for CSV files? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -No. From Mattermost Server v5.35, there's no limit to the number of rows within Compliance Monitoring CSV files. +No. From Mattermost Server v5.36, there's no limit to the number of rows within Compliance Monitoring CSV files. Why is the Compliance Exports feature in Beta? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
export/html: Document page: styles for <blockquote> Closes
@@ -67,6 +67,12 @@ pre.code { font-family: var(--code-font-family); } +blockquote { + color: #666; + padding: .25em 0 0.25em 1rem; + border-left: 4px solid #ccc; +} + a.reference.external { display: inline-block; max-width: 100%;
pass daemon to Commands instance in qt console. follow
@@ -2184,9 +2184,12 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): 'lnutil': lnutil, }) - c = commands.Commands(config=self.config, + c = commands.Commands( + config=self.config, + daemon=self.gui_object.daemon, network=self.network, callback=lambda: self.console.set_json(True)) + methods = {} def mkfunc(f, method): return lambda *args, **kwargs: f(method,
Github workflows : Use Python script for Arnold versions MSVC has trouble building from a bash shell. See
@@ -177,21 +177,38 @@ jobs: - name: Build and test Arnold extension run: | - for arnoldVersion in 6.2.0.1 7.0.0.2 - do - # Install Arnold - ./.github/workflows/main/installArnold.py --version $arnoldVersion - export ARNOLD_ROOT=$GITHUB_WORKSPACE/arnoldRoot/$arnoldVersion + import subprocess + import sys + import os + + for arnoldVersion in [ "6.2.0.1", "7.0.0.2" ] : + arnoldRoot = os.path.join( os.environ["GITHUB_WORKSPACE"], "arnoldRoot", arnoldVersion ) + os.environ["ARNOLD_ROOT"] = arnoldRoot + + subprocess.check_call( + [ + sys.executable, + ".github/workflows/main/installArnold.py", + "--version", + arnoldVersion + ] + ) #Build Arnold extension - scons -j 2 build BUILD_TYPE=${{ matrix.buildType }} OPTIONS=.github/workflows/main/sconsOptions + subprocess.check_call( "scons -j 2 build BUILD_TYPE=${{ matrix.buildType }} OPTIONS=.github/workflows/main/sconsOptions", shell = True ) + + if os.name != "nt" : # Test Arnold extension - echo "::add-matcher::./.github/workflows/main/problemMatchers/unittest.json" - ${{ matrix.testRunner }} "${{ env.GAFFER_BUILD_DIR }}/bin/gaffer test IECoreArnoldTest GafferArnoldTest GafferArnoldUITest" - echo "::remove-matcher owner=unittest::" + print( "::add-matcher::./.github/workflows/main/problemMatchers/unittest.json" ) + subprocess.check_call( "${{ matrix.testRunner }} \"" + os.path.join( os.environ["GAFFER_BUILD_DIR"], "bin", "gaffer" ) + " test IECoreArnoldTest GafferArnoldTest GafferArnoldUITest\"", shell = True ) + print( "::remove-matcher owner=unittest::" ) + # Publish ARNOLD_ROOT to the environment for subsequent steps, # so we can build the docs for GafferArnold. - echo ARNOLD_ROOT=$ARNOLD_ROOT >> $GITHUB_ENV - done + with open( os.environ["GITHUB_ENV"], "a" ) as f : + print( "Setting $ARNOLD_ROOT to '%s'" % arnoldRoot ) + f.write( 'ARNOLD_ROOT=%s\n' % arnoldRoot ) + + shell: python - name: Build Docs and Package # We currently experience sporadic hangs in the docs builds (mac), this
wsrtsegmented: dont skip over a bunch of text for some reason this code skipped over the first lines in a single segmented file and the end of it.
@@ -367,7 +367,6 @@ def _wrstsegments(entries: list) -> str: time = float(re.search(r"X-TIMESTAMP-MAP=MPEGTS:(\d+)", t).group(1)) / 90000 if time > 0: time -= 10 - text = text[3 : len(text) - 2] itmes = [] if len(text) > 1: for n in text:
More tests for rebalance_missing_suppression_count This option already has everything ops needs to control client/server error responses during rebalance; we should document it's expected use more effectively.
@@ -1627,6 +1627,31 @@ class TestReplicatedObjController(CommonObjectControllerMixin, policy_opts.rebalance_missing_suppression_count = 2 do_test([Timeout(), 404, 404], 503) + # overloaded primary after double rebalance + # ... opts should increase rebalance_missing_suppression_count + policy_opts.rebalance_missing_suppression_count = 2 + do_test([Timeout(), 404, 404], 503) + + # two primaries out, but no rebalance + # ... default is fine for tombstones + policy_opts.rebalance_missing_suppression_count = 1 + do_test([Timeout(), Exception('kaboom!'), 404], 404, + include_timestamp=True) + # ... but maybe not ideal for missing names + # (N.B. 503 isn't really a BAD response here) + do_test([Timeout(), Exception('kaboom!'), 404], 503) + # still ... ops might think they should tune it down + policy_opts.rebalance_missing_suppression_count = 0 + do_test([Timeout(), Exception('kaboom!'), 404], 404) + # and we could maybe leave it like this for the next rebalance + do_test([Timeout(), 404, 404], 404) + # ... but it gets bad when faced with timeouts, b/c we can't trust a + # single primary 404 response during rebalance + do_test([Timeout(), Timeout(), 404], 404) + # ops needs to fix configs to get the 503 + policy_opts.rebalance_missing_suppression_count = 1 + do_test([Timeout(), Timeout(), 404], 503) + def test_GET_primaries_mixed_explode_and_timeout(self): req = swift.common.swob.Request.blank('/v1/a/c/o') primaries = []
Fix mypy issue with torch.linalg * add a compatibiliy stub for torch solve * change missing _torch_solve_cast calls * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see * bump pytorch version to 1.7.1 * Fix mypy issue with torch.linalg
@@ -3,7 +3,10 @@ import torch from packaging import version if version.parse(torch.__version__) > version.parse("1.7.1"): - from torch.linalg import solve + # TODO: remove the type: ignore once Python 3.6 is deprecated. + # It turns out that Pytorch has no attribute `torch.linalg` for + # Python 3.6 / PyTorch 1.7.0, 1.7.1 + from torch.linalg import solve # type: ignore else: from torch import solve as _solve
Properly check for pre-moderation workflow [#PLAT-921]
@@ -162,12 +162,12 @@ class PreprintSerializer(TaxonomizableSerializerMixin, JSONAPISerializer): return 'https://dx.doi.org/{}'.format(obj.article_doi) if obj.article_doi else None def get_preprint_doi_url(self, obj): + doi = None doi_identifier = obj.get_identifier('doi') if doi_identifier: doi = doi_identifier.value - else: # if proivider has pre-moderation, don't show the DOI prematurely - if obj.provider.reviews_workflow != workflows.Workflows.PRE_MODERATION: + elif obj.provider.reviews_workflow != workflows.Workflows.PRE_MODERATION.value: client = obj.get_doi_client() doi = client.build_doi(preprint=obj) if client else None return 'https://dx.doi.org/{}'.format(doi) if doi else None
Update manipulate.py Just altered findall to find_all
@@ -16,7 +16,7 @@ class Manipulate(object): manipulate: - <destination field>: - [findall]: <boolean> + [find_all]: <boolean> [phase]: <phase> [from]: <source field> [extract]: <regexp> @@ -45,7 +45,7 @@ class Manipulate(object): 'extract': {'type': 'string', 'format': 'regex'}, 'separator': {'type': 'string'}, 'remove': {'type': 'boolean'}, - 'findall': {'type': 'boolean'}, + 'find_all': {'type': 'boolean'}, 'replace': { 'type': 'object', 'properties': { @@ -119,7 +119,7 @@ class Manipulate(object): if not field_value: log.warning('Cannot extract, field `%s` is not present', from_field) continue - if config.get('findall'): + if config.get('find_all'): match = re.findall(config['extract'], field_value, re.I | re.U) log.debug('all matches: %s', match) field_value = config.get('separator', ' ').join(match).strip()
Insist on credentials for testing AMI finding Fix by only looking for the Flatcar AMI when we have AWS credentials
@@ -41,6 +41,8 @@ class AWSProvisionerBenchTest(ToilTest): Tests for the AWS provisioner that don't actually provision anything. """ + # Needs to talk to EC2 for image discovery + @needs_aws_ec2 def testAMIFinding(self): for zone in ['us-west-2a', 'eu-central-1a', 'sa-east-1b']: provisioner = AWSProvisioner('fakename', 'mesos', zone, 10000, None, None)
Update fynloski.txt Moved to ```cybergate```
@@ -1075,12 +1075,6 @@ myhtrahdd-22322.portmap.io 178.124.140.136:2033 okechu.ddns.net -# Reference: https://www.virustotal.com/gui/file/1bc564fed502e1c1769f39a9e2cdc214f1bde06357102403668428fe624f3faf/detection - -142.44.161.51:4338 -185.101.92.3:4338 -ales2018.myq-see.com - # Reference: https://www.virustotal.com/gui/file/8ae917f86da4edb9f1c78a61b2ae37acc378cc4041c88b76d015a15716af3a63/detection 104.244.75.220:3333
Enhancement - Add raw_config_args option to providers Adds raw_config_args support for providers. Fixes a copy/paste typo in the raw_config_args support for instances.
@@ -81,6 +81,13 @@ Vagrant.configure('2') do |config| end } end + + # Raw Configuration + if provider['raw_config_args'] + provider['raw_config_args'].each { |raw_config_arg| + eval("virtualbox.#{raw_config_arg}") + } + end end # The vagrant-vbguest plugin attempts to update packages @@ -119,6 +126,13 @@ Vagrant.configure('2') do |config| end } end + + # Raw Configuration + if provider['raw_config_args'] + provider['raw_config_args'].each { |raw_config_arg| + eval("vmware.#{raw_config_arg}") + } + end end end @@ -148,6 +162,13 @@ Vagrant.configure('2') do |config| end } end + + # Raw Configuration + if provider['raw_config_args'] + provider['raw_config_args'].each { |raw_config_arg| + eval("parallels.#{raw_config_arg}") + } + end end end @@ -177,6 +198,13 @@ Vagrant.configure('2') do |config| end } end + + # Raw Configuration + if provider['raw_config_args'] + provider['raw_config_args'].each { |raw_config_arg| + eval("libvirt.#{raw_config_arg}") + } + end end end end @@ -208,11 +236,10 @@ Vagrant.configure('2') do |config| end if instance['raw_config_args'] - provider['options'].each { |raw_config_arg| + instance['raw_config_args'].each { |raw_config_arg| eval("c.#{raw_config_arg}") } end - end } end
DomainExpr: switch to ComputingExpr TN:
@@ -7,7 +7,7 @@ from langkit.compiled_types import ( from langkit.diagnostics import check_multiple, check_source_language from langkit.expressions.base import ( - AbstractExpression, CallExpr, LiteralExpr, PropertyDef, ResolvedExpression, + AbstractExpression, CallExpr, ComputingExpr, LiteralExpr, PropertyDef, construct, auto_attr, render ) @@ -216,7 +216,7 @@ class Bind(AbstractExpression): lhs, rhs, pred_func, abstract_expr=self) -class DomainExpr(ResolvedExpression): +class DomainExpr(ComputingExpr): static_type = EquationType def __init__(self, domain, logic_var_expr, abstract_expr=None): @@ -226,15 +226,12 @@ class DomainExpr(ResolvedExpression): self.logic_var_expr = logic_var_expr ":type: ResolvedExpression" - super(DomainExpr, self).__init__(result_var_name='Var', + super(DomainExpr, self).__init__('Domain_Equation', abstract_expr=abstract_expr) def _render_pre(self): return render('properties/domain_ada', expr=self) - def _render_expr(self): - return str(self.result_var.name) - @property def subexprs(self): return {'domain': self.domain, 'logic_var_expr': self.logic_var_expr}
Return better errors on invalid API parameters * Return better errors on invalid API parameters For example: ``` { "success": false, "errors": { "user_id": "value is not a valid integer" } } ```
from functools import wraps from flask import request -from pydantic import create_model +from pydantic import ValidationError, create_model ARG_LOCATIONS = { "query": lambda: request.args, @@ -41,7 +41,18 @@ def validate_args(spec, location): @wraps(func) def wrapper(*args, **kwargs): data = ARG_LOCATIONS[location]() + try: + # Try to load data according to pydantic spec loaded = spec(**data).dict(exclude_unset=True) + except ValidationError as e: + # Handle reporting errors when invalid + resp = {} + errors = e.errors() + for err in errors: + loc = err["loc"][0] + msg = err["msg"] + resp[loc] = msg + return {"success": False, "errors": resp}, 400 return func(*args, loaded, **kwargs) return wrapper
fix BAU bill in proforma escalation rate was raised to wrong year
@@ -1142,15 +1142,17 @@ def generate_proforma(scenariomodel, template_workbook, output_file_path): current_row += 1 ws['A{}'.format(current_row)] = "Electricity bill without system ($)" ws['B{}'.format(current_row)] = 0 - ws['C{}'.format(current_row)] = '={}'.format(year_one_bau_bill_cell) electric_bau_costs_cell_series = ["\'{}\'!{}{}".format(inandout_sheet_name, "B", current_row), "\'{}\'!{}{}".format( inandout_sheet_name, "C", current_row)] - for i in range(2, financial.analysis_years + 1): - ws['{}{}'.format(upper_case_letters[1 + i], current_row)] = \ - '=${base_col}${base_row}*(1+{escalation_pct_cell}/100)^{i}'.format( - base_col="C", base_row=current_row, i=i - 1, escalation_pct_cell=escalation_pct_cell) - electric_bau_costs_cell_series.append("\'{}\'!{}{}".format(inandout_sheet_name, upper_case_letters[1 + i], + for year in range(1, financial.analysis_years + 1): + ws['{}{}'.format(upper_case_letters[year+1], current_row)] = \ + '={year_one_bau_bill} * (1 + {escalation_pct}/100)^{year}'.format( + year_one_bau_bill=year_one_bau_bill_cell, + escalation_pct=escalation_pct_cell, + year=year, + ) + electric_bau_costs_cell_series.append("\'{}\'!{}{}".format(inandout_sheet_name, upper_case_letters[1 + year], current_row)) make_attribute_row(ws, current_row, length=financial.analysis_years+2, alignment=center_align,
Installation for MacOS: use curl instead of wget See issue - For consistency, change also the instructions for Linux.
@@ -16,12 +16,12 @@ package. The Python 3 version is recommended. On MacOS, run:: - wget https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + curl -O https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh sh Miniconda3-latest-MacOSX-x86_64.sh On Linux, run:: - wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh + curl -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh sh Miniconda3-latest-Linux-x86_64.sh Follow the instructions in the installer. If you encounter problems,
add options to classify text for labels and fmt add option to classify_text on a label first TSV similar to training format. Also add option to not print the text output at all. Finally, add a delimiter for output and switch default from "," its previous default, to "\t"
@@ -20,16 +20,23 @@ def main(): parser.add_argument('--model_type', type=str, default='default') parser.add_argument('--modules', default=[], nargs="+") parser.add_argument('--scores', '-s', action="store_true") + parser.add_argument('--label_first', action="store_true", help="Use the second column") + parser.add_argument("--output_delim", default="\t") + parser.add_argument("--no_text_output", action="store_true", help="Dont write the text") args = parser.parse_args() for mod_name in args.modules: bl.import_user_module(mod_name) + labels = [] if os.path.exists(args.text) and os.path.isfile(args.text): texts = [] with open(args.text, 'r') as f: for line in f: text = line.strip().split() + if args.label_first: + labels.append(text[0]) + text = text[1:] texts += [text] else: @@ -39,13 +46,25 @@ def main(): m = bl.ClassifierService.load(args.model, backend=args.backend, remote=args.remote, name=args.name, preproc=args.preproc, device=args.device, model_type=args.model_type) + + if args.label_first: + label_iter = iter(labels) for texts in batched: for text, output in zip(texts, m.predict(texts)): + + if args.no_text_output: + text_output = '' + else: + text_output = ' '.join(text) + {args.output_delim} if args.scores: - print("{}, {}".format(" ".join(text), output)) + guess_output = output else: - print("{}, {}".format(" ".join(text), output[0][0])) + guess_output = output[0][0] + s = f"{text_output}{guess_output}" + if args.label_first: + s = f"{next(label_iter)}{args.output_delim}{s}" + print(s) if __name__ == '__main__': main()
Small update to signing region override logic Only override the signing region if the arn region is used. This ensures that if the client region is used, we will use the signing region that the client was originally resolved to.
@@ -1388,10 +1388,9 @@ class S3EndpointSetter(object): def set_endpoint(self, request, **kwargs): if self._use_accesspoint_endpoint(request): self._validate_accesspoint_supported(request) - region_name = self._get_region_for_accesspoint_endpoint(request) + region_name = self._resolve_region_for_accesspoint_endpoint( + request) self._switch_to_accesspoint_endpoint(request, region_name) - self._override_signing_region_if_needed( - request, region_name, kwargs['region_name']) return if self._use_accelerate_endpoint: switch_host_s3_accelerate(request=request, **kwargs) @@ -1427,9 +1426,14 @@ class S3EndpointSetter(object): ) ) - def _get_region_for_accesspoint_endpoint(self, request): + def _resolve_region_for_accesspoint_endpoint(self, request): if self._s3_config.get('use_arn_region', True): - return request.context['s3_accesspoint']['region'] + accesspoint_region = request.context['s3_accesspoint']['region'] + # If we are using the region from the access point, + # we will also want to make sure that we set it as the + # signing region as well + self._override_signing_region(request, accesspoint_region) + return accesspoint_region return self._region def _switch_to_accesspoint_endpoint(self, request, region_name): @@ -1479,10 +1483,7 @@ class S3EndpointSetter(object): dns_suffix = resolved['dnsSuffix'] return dns_suffix - def _override_signing_region_if_needed(self, request, region_name, - current_signing_region): - if region_name == current_signing_region: - return + def _override_signing_region(self, request, region_name): signing_context = { 'region': region_name, }
Reapply: Add support for PyTorch estimators Commit was not correctly applied during recent 1.5.0 release.
@@ -184,6 +184,24 @@ class TestImperceptibleASR: except ARTTestException as e: art_warning(e) + @pytest.mark.skipMlFramework("tensorflow", "mxnet", "kerastf", "non_dl_frameworks") + def test_loss_gradient_masking_threshold_torch(self, art_warning, asr_dummy_estimator, audio_batch_padded): + try: + test_delta = audio_batch_padded + test_psd_maximum = np.ones((test_delta.shape[0], 28)) + test_masking_threshold = np.zeros((test_delta.shape[0], 1025, 28)) + + imperceptible_asr = ImperceptibleASR( + estimator=asr_dummy_estimator(), masker=PsychoacousticMasker(), + ) + loss_gradient, loss = imperceptible_asr._loss_gradient_masking_threshold_torch( + test_delta, test_psd_maximum, test_masking_threshold) + + assert loss_gradient.shape == test_delta.shape + assert loss.ndim == 1 and loss.shape[0] == test_delta.shape[0] + except ARTTestException as e: + art_warning(e) + @pytest.mark.skipMlFramework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks") def test_approximate_power_spectral_density_tf(self, art_warning, asr_dummy_estimator, audio_batch_padded): try: @@ -213,6 +231,29 @@ class TestImperceptibleASR: except ARTTestException as e: art_warning(e) + @pytest.mark.skipMlFramework("tensorflow", "mxnet", "kerastf", "non_dl_frameworks") + def test_approximate_power_spectral_density_torch(self, art_warning, asr_dummy_estimator, audio_batch_padded): + try: + import torch + + test_delta = audio_batch_padded + test_psd_maximum = np.ones((test_delta.shape[0], 28)) + + masker = PsychoacousticMasker() + imperceptible_asr = ImperceptibleASR( + estimator=asr_dummy_estimator(), masker=masker, + ) + approximate_psd_torch = imperceptible_asr._approximate_power_spectral_density_torch( + torch.from_numpy(test_delta), torch.from_numpy(test_psd_maximum) + ) + psd_approximated = approximate_psd_torch.numpy() + + assert psd_approximated.ndim == 3 + assert psd_approximated.shape[0] == test_delta.shape[0] # batch_size + assert psd_approximated.shape[1] == masker.window_size // 2 + 1 + except ARTTestException as e: + art_warning(e) + class TestPsychoacousticMasker: """
Remove mention of Python 2.7 from installation docs Closes
@@ -52,15 +52,12 @@ Cutadapt installation requires this software to be installed: Under Ubuntu, you may need to install the packages ``build-essential`` and ``python3-dev`` to get a C compiler. -On Windows, you need `Microsoft Visual C++ Compiler for -Python 2.7 <https://www.microsoft.com/en-us/download/details.aspx?id=44266>`_. - If you get an error message:: error: command 'gcc' failed with exit status 1 Then check the entire error message. If it says something about a missing -``Python.h`` file, then the problem are missing Python development +``Python.h`` file, then the problem is that you are missing Python development packages (``python3-dev`` in Ubuntu).
[IMPR] Use pywikibot.__version__ for version informations detached from
@@ -30,6 +30,9 @@ repo_dir = abspath(join(docs_dir, '..')) sys.path.insert(0, repo_dir) os.chdir(repo_dir) +os.environ['PYWIKIBOT_NO_USER_CONFIG'] = '1' +import pywikibot # noqa: E402 + # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. @@ -67,11 +70,7 @@ copyright = '2003-2020, Pywikibot team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. -# -# The short X.Y version. -version = '3.0' -# The full version, including alpha/beta/rc tags. -release = '3.0.20200609' +version = release = pywikibot.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -256,11 +255,6 @@ texinfo_documents = [ # texinfo_show_urls = 'footnote' -def pywikibot_env(): - """Allow pywikibot modules to be imported without a user-config.py.""" - os.environ['PYWIKIBOT_NO_USER_CONFIG'] = '1' - - def pywikibot_script_docstring_fixups(app, what, name, obj, options, lines): """Pywikibot specific conversions.""" from scripts.cosmetic_changes import warning @@ -357,8 +351,6 @@ def setup(app): app.add_autodoc_attrgetter(type, pywikibot_family_classproperty_getattr) -pywikibot_env() - autodoc_default_options = { 'members': True, 'undoc-members': True,
Fixed typo in normalize() function. Changed 'channely' to 'channel' in the comments of normalize function.
@@ -192,7 +192,7 @@ def normalize(tensor, mean, std, inplace=False): Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. mean (sequence): Sequence of means for each channel. - std (sequence): Sequence of standard deviations for each channely. + std (sequence): Sequence of standard deviations for each channel. Returns: Tensor: Normalized Tensor image.
FIX: Force tuple elements to have the same dtype Fix
@@ -85,7 +85,7 @@ def _support_enumeration_gen(payoff_matrices): n_min = min(nums_actions) for k in range(1, n_min+1): - supps = (np.arange(k), np.empty(k, np.int_)) + supps = (np.arange(0, k, 1, np.int_), np.empty(k, np.int_)) actions = (np.empty(k+1), np.empty(k+1)) A = np.empty((k+1, k+1))
MAINT: Pin jinja2<3.1 to avoid breaking sphinx Ref: Note: jinja2 is not a requirement that we need to directlys specify.
@@ -61,6 +61,8 @@ doc = nbsphinx numpydoc>=0.9.1 sphinx>=3.0.0 + # Ref: https://github.com/sphinx-doc/sphinx/issues/10291 + jinja2<3.1 [options.package_data] plotnine = plotnine/tests/baseline_images/**/*.png, plotnine/data/*.csv
bump required Python version to >=3.7 Python version 3.5 reached end of life in September 2020, version 3.6 in December 2021. This patch drops support for both versions.
@@ -35,7 +35,7 @@ setup( packages=['nutils', 'nutils.matrix'], long_description=long_description, license='MIT', - python_requires='>=3.5', + python_requires='>=3.7', install_requires=['numpy>=1.17', 'treelog>=1.0b5', 'stringly'], extras_require=dict( docs=['Sphinx>=1.6', 'scipy>=0.13', 'matplotlib>=1.3'],
Add '-E' options to pass proxy env for sudo script install-deps.sh doesn't pass proxy options INSTALLER_CMD="sudo -H ${PKG_MANAGER} -y install" Added -E (similar to elsewhere in script) to allow dnf/yum installation of packages. Updated OS_FAMILY "Suse", "Debian" and "RedHat" for consistency. Closes-bug:
@@ -14,7 +14,7 @@ CHECK_CMD_PKGS=( # is installed if [ -x '/usr/bin/zypper' ]; then OS_FAMILY="Suse" - INSTALLER_CMD="sudo -H zypper install -y" + INSTALLER_CMD="sudo -H -E zypper install -y" CHECK_CMD="zypper search --match-exact --installed" PKG_MAP=( [gcc]=gcc @@ -35,7 +35,7 @@ if [ -x '/usr/bin/zypper' ]; then fi elif [ -x '/usr/bin/apt-get' ]; then OS_FAMILY="Debian" - INSTALLER_CMD="sudo -H apt-get -y install" + INSTALLER_CMD="sudo -H -E apt-get -y install" CHECK_CMD="dpkg -l" PKG_MAP=( [gcc]=gcc [git]=git @@ -51,7 +51,7 @@ elif [ -x '/usr/bin/apt-get' ]; then elif [ -x '/usr/bin/dnf' ] || [ -x '/usr/bin/yum' ]; then OS_FAMILY="RedHat" PKG_MANAGER=$(which dnf || which yum) - INSTALLER_CMD="sudo -H ${PKG_MANAGER} -y install" + INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -y install" CHECK_CMD="rpm -q" PKG_MAP=( [gcc]=gcc
Argument parsing and support currention functionality for still need to add support for -o, -r, and -e options.
@@ -1683,24 +1683,28 @@ Paths or arguments that contain spaces must be enclosed in quotes exit_msg = 'Leaving IPython, back to {}'.format(sys.argv[0]) embed(banner1=banner, exit_msg=exit_msg) - show_parser = argparse.ArgumentParser( - description='list past commands issued', + history_parser = argparse.ArgumentParser( + description='run, edit, and save past commands', formatter_class=argparse.RawTextHelpFormatter, ) - show_parser.add_argument('-s', '--script', action='store_true', help='script format; no separation lines') - _history_arg_help = """no arg list all -arg is integer by index -a..b, a:b, a:, ..b by indices (inclusive) -arg is string containing string -arg is /regex/ matching regular expression regex""" - show_parser.add_argument('arg', nargs='*', help=_history_arg_help) - - @with_argument_parser(show_parser) + history_parser.add_argument('-s', '--script', action='store_true', help='script format; no separation lines') + history_parser_group = history_parser.add_mutually_exclusive_group() + history_parser_group.add_argument('-r', '--run', action='store_true', help='run selected history items') + history_parser_group.add_argument('-e', '--edit', action='store_true', help='edit and then run selected history items') + history_parser_group.add_argument('-o', '--output-file', metavar='FILE', type=argparse.FileType('w'), help='output to file') + _history_arg_help = """empty all history items +a one history item by number +a..b, a:b, a:, ..b items by indices (inclusive) +[string] items containing string +/regex/ items matching regular expression""" + history_parser.add_argument('arg', nargs='?', help=_history_arg_help) + + @with_argument_parser(history_parser) def do_history(self, args): # If an argument was supplied, then retrieve partial contents of the history if args.arg: # If a character indicating a slice is present, retrieve a slice of the history - arg = args.arg[0] + arg = args.arg if '..' in arg or ':' in arg: try: # Get a slice of history
PERF: Variable-size minutely cache. Use a variable-size cache for minutely pricing data. Increase the default cache size for close to 3000, since close prices are used in many places in the simulation as the best-known price of assets. This dramatically speeds up algorithms that read the prices of many assets without ordering them.
@@ -42,6 +42,7 @@ from zipline.data.bar_reader import BarReader, NoDataForSid, NoDataOnDate from zipline.data.us_equity_pricing import check_uint32_safe from zipline.utils.calendars import get_calendar from zipline.utils.cli import maybe_show_progress +from zipline.utils.compat import mappingproxy from zipline.utils.memoize import lazyval @@ -898,8 +899,22 @@ class BcolzMinuteBarReader(MinuteBarReader): zipline.data.minute_bars.BcolzMinuteBarWriter """ FIELDS = ('open', 'high', 'low', 'close', 'volume') + DEFAULT_MINUTELY_SID_CACHE_SIZES = { + 'close': 3000, + 'open': 1550, + 'high': 1550, + 'low': 1550, + 'volume': 1550, + } + assert set(FIELDS) == set(DEFAULT_MINUTELY_SID_CACHE_SIZES) + + # Wrap the defaults in proxy so that we don't accidentally mutate them in + # place in the constructor. If a user wants to change the defaults, they + # can do so by mutating DEFAULT_MINUTELY_SID_CACHE_SIZES. + _default_proxy = mappingproxy(DEFAULT_MINUTELY_SID_CACHE_SIZES) + + def __init__(self, rootdir, sid_cache_sizes=_default_proxy): - def __init__(self, rootdir, sid_cache_size=1550): self._rootdir = rootdir metadata = self._get_metadata() @@ -931,7 +946,7 @@ class BcolzMinuteBarReader(MinuteBarReader): self._minutes_per_day = metadata.minutes_per_day self._carrays = { - field: LRU(sid_cache_size) + field: LRU(sid_cache_sizes[field]) for field in self.FIELDS }
Move location of adding calibration in _poisson Unsure why, but if the calibration region is added at the end versus at the beginning, the _poisson function segmentation faults when decorated with numba.jit. Moving the calibration region to the beginning alleviates this.
@@ -158,6 +158,10 @@ def radial(coord_shape, img_shape, golden=True, dtype=np.float): def _poisson(nx, ny, max_attempts, radius_x, radius_y, calib, seed=None): mask = np.zeros((ny, nx)) + # Add calibration region + mask[int(ny / 2 - calib[-2] / 2):int(ny / 2 + calib[-2] / 2), + int(nx / 2 - calib[-1] / 2):int(nx / 2 + calib[-1] / 2)] = 1 + if seed is not None: np.random.seed(int(seed)) @@ -213,10 +217,6 @@ def _poisson(nx, ny, max_attempts, radius_x, radius_y, calib, seed=None): pys[i] = pys[num_actives - 1] num_actives -= 1 - # Add calibration region - mask[int(ny / 2 - calib[-2] / 2):int(ny / 2 + calib[-2] / 2), - int(nx / 2 - calib[-1] / 2):int(nx / 2 + calib[-1] / 2)] = 1 - return mask
Remove duplicated Zuul job definition Now neutron-tempest-dvr-ha-multinode-full has been replaced in tempest, nova and devstack with neutron-ovs-tempest-dvr-ha-multinode-full. This definition should be no longer needed.
availability_zone: nova debug_iptables_rules: True -# TODO(ralonsoh): remove this duplicated definition when "devstack", -# "tempest" and "nova" adopt the new name. -- job: - name: neutron-tempest-dvr-ha-multinode-full - parent: neutron-ovs-tempest-dvr-ha-multinode-full - - job: name: neutron-ovs-tempest-multinode-full parent: tempest-multinode-full-py3
Rewrite /app/version.json with correct CIRCLE_BUILD_URL * Rewrite /app/version.json with correct CIRCLE_BUILD_URL * Combine build, deploy, and release jobs This is so that we don't have to touch the image again once it's built.
version: 2.0 jobs: - build: + build-deploy: # build for the master branch machine: true working_directory: ~/addons-server steps: @@ -15,14 +15,32 @@ jobs: "$CIRCLE_BUILD_URL" > version.json - run: - name: Build docker image and save to cache + name: Build docker image and push to repo command: | docker build -t app:build -f Dockerfile.deploy . - mkdir -p docker-cache - docker save -o ./docker-cache/built-image.tar app:build - - persist_to_workspace: - root: . - paths: . + docker tag app:build ${DOCKERHUB_REPO}:latest + docker login -e $DOCKERHUB_EMAIL -u $DOCKERHUB_USER -p $DOCKERHUB_PASS + docker push ${DOCKERHUB_REPO}:latest + build-release: # build for releases (tags) + machine: true + working_directory: ~/addons-server + steps: + - checkout + - run: > + printf '{"commit":"%s","version":"%s","source":"https://github.com/%s/%s","build":"%s"}\n' + "$CIRCLE_SHA1" + "$CIRCLE_TAG" + "$CIRCLE_PROJECT_USERNAME" + "$CIRCLE_PROJECT_REPONAME" + "$CIRCLE_BUILD_URL" + > version.json + - run: + name: Build docker image and push to repo + command: | + docker build -t app:build -f Dockerfile.deploy . + docker tag app:build ${DOCKERHUB_REPO}:${CIRCLE_TAG} + docker login -e $DOCKERHUB_EMAIL -u $DOCKERHUB_USER -p $DOCKERHUB_PASS + docker push ${DOCKERHUB_REPO}:${CIRCLE_TAG} integration_test: working_directory: ~/addons-server machine: true @@ -70,49 +88,18 @@ jobs: key: uitest-cache- paths: - .tox - deploy: - machine: true - working_directory: ~/addons-server - steps: - - attach_workspace: - at: ~/addons-server - - run: - name: Dockerfile deploy - command: | - docker load -i ./docker-cache/built-image.tar - docker tag app:build ${DOCKERHUB_REPO}:latest - docker login -e $DOCKERHUB_EMAIL -u $DOCKERHUB_USER -p $DOCKERHUB_PASS - docker push ${DOCKERHUB_REPO}:latest - release: - machine: true - working_directory: ~/addons-server - steps: - - attach_workspace: - at: ~/addons-server - - run: - Name: Release - command: | - docker load -i ./docker-cache/built-image.tar - docker tag app:build ${DOCKERHUB_REPO}:${CIRCLE_TAG} - docker login -e $DOCKERHUB_EMAIL -u $DOCKERHUB_USER -p $DOCKERHUB_PASS - docker push ${DOCKERHUB_REPO}:${CIRCLE_TAG} workflows: version: 2 build_test_deploy_release: jobs: - - build - - integration_test - - deploy: - requires: - - build + - build-deploy: filters: branches: only: master - - release: - requires: - - build + - build-release: filters: tags: only: /.*/ branches: ignore: /.*/ + - integration_test
kde speedup Added an `nkde` option which resamples the underlying log-volume grid when evaluating/plotting the KDE PDF. Should resolve
@@ -36,8 +36,8 @@ __all__ = ["runplot", "traceplot", "cornerpoints", "cornerplot", "boundplot", "cornerbound", "_hist2d", "_quantile"] -def runplot(results, span=None, logplot=False, kde=True, color='blue', - plot_kwargs=None, label_kwargs=None, lnz_error=True, +def runplot(results, span=None, logplot=False, kde=True, nkde=1000, + color='blue', plot_kwargs=None, label_kwargs=None, lnz_error=True, lnz_truth=None, truth_color='red', truth_kwargs=None, max_x_ticks=8, max_y_ticks=3, use_math_text=True, mark_final_live=True, fig=None): @@ -70,6 +70,10 @@ def runplot(results, span=None, logplot=False, kde=True, color='blue', (as opposed to the importance weights themselves). Default is `True`. + nkde : int, optional + The number of grid points used when plotting the kernel density + estimate. Default is `1000`. + color : str or iterable with shape (4,), optional A `~matplotlib`-style color (either a single color or a different value for each subplot) used when plotting the lines in each subplot. @@ -173,8 +177,10 @@ def runplot(results, span=None, logplot=False, kde=True, color='blue', # Determine plotting bounds for each subplot. data = [nlive, np.exp(logl), np.exp(logwt), np.exp(logz)] if kde: + # Derive kernel density estimate. wt_kde = gaussian_kde(resample_equal(-logvol, data[2])) # KDE - data[2] = wt_kde.pdf(-logvol) # evaluate KDE PDF + logvol_new = np.linspace(logvol[0], logvol[-1], nkde) # resample + data[2] = wt_kde.pdf(-logvol_new) # evaluate KDE PDF if span is None: span = [(0., 1.05 * max(d)) for d in data] no_span = True @@ -257,6 +263,8 @@ def runplot(results, span=None, logplot=False, kde=True, color='blue', if logplot and i == 3: ax.semilogy(-logvol, d, color=c, **plot_kwargs) yspan = [ax.get_ylim() for ax in axes] + elif kde and i == 2: + ax.plot(-logvol_new, d, color=c, **plot_kwargs) else: ax.plot(-logvol, d, color=c, **plot_kwargs) if i == 3 and lnz_error:
settings: Link to relevant help center articles in notification settings. This provides a hook for users to get to these /help/ pages, which contain details on exactly how these notifications/settings work, as well as troubleshooting advice. Fixes:
<div class="desktop_notifications m-10 {{#if for_realm_settings}}org-subsection-parent{{else}}subsection-parent{{/if}}"> <div class="subsection-header inline-block"> - <h3>{{t "Desktop message notifications" }}</h3> + <h3>{{t "Desktop message notifications" }} + {{> ../help_link_widget link="/help/desktop-notifications" }} + </h3> {{> settings_save_discard_widget section_name="desktop-message-settings" show_only_indicator=(not for_realm_settings) }} </div> <div class="mobile_notifications m-10 {{#if for_realm_settings}}org-subsection-parent{{else}}subsection-parent{{/if}}"> <div class="subsection-header inline-block"> - <h3>{{t "Mobile message notifications" }}</h3> + <h3>{{t "Mobile message notifications" }} + {{> ../help_link_widget link="/help/mobile-notifications" }} + </h3> {{> settings_save_discard_widget section_name="mobile-message-settings" show_only_indicator=(not for_realm_settings) }} </div> <div class="email_message_notifications m-10 {{#if for_realm_settings}}org-subsection-parent{{else}}subsection-parent{{/if}}"> <div class="subsection-header inline-block"> - <h3>{{t "Email message notifications" }}</h3> + <h3>{{t "Email message notifications" }} + {{> ../help_link_widget link="/help/email-notifications" }} + </h3> {{> settings_save_discard_widget section_name="email-message-settings" show_only_indicator=(not for_realm_settings) }} </div>
README corrections Use Python 3 print() statements
@@ -75,9 +75,9 @@ set containing them: .. code:: python - >>> import soco - >>> for zone in soco.discover(): - ... print zone.player_name + >>> from soco import discover + >>> for zone in discover(): + ... print(zone.player_name) Living Room Kitchen @@ -86,7 +86,7 @@ If you prefer a list to a set: .. code:: python - >>> zone_list = list(soco.discover()) + >>> zone_list = list(discover()) >>> zone_list [SoCo("192.168.1.101"), SoCo("192.168.1.102")] >>> zone_list[0].mute = True @@ -109,7 +109,7 @@ Of course, you can also play music! track = sonos.get_current_track_info() - print track['title'] + print(track['title']) sonos.pause()
DOC: pip install tulip from GitHub in README [ci skip]
@@ -24,6 +24,10 @@ consult https://tulip-control.sourceforge.io/doc/install.html The next section describes how to build documentation. A test suite is provided under ``tests/``. Consult the section "Testing" below. +Pip can install the latest development version too:: + + pip install https://github.com/tulip-control/tulip-control/archive/master.zip + Documentation -------------
Lexical env: reformat Env_Getter documentation, add some privacy TN:
@@ -61,34 +61,29 @@ package Langkit_Support.Lexical_Env is type Getter_Fn_T is access function (Self : Getter_State_T) return Lexical_Env; - type Env_Getter (Dynamic : Boolean := False) is record - case Dynamic is - when True => - Getter_State : Getter_State_T; - Getter_Fn : Getter_Fn_T; - when False => - Env : Lexical_Env; - end case; - end record; + type Env_Getter is private; + -- Link to an environment. It can be either a simple link (just a pointer) + -- or a dynamic link (a function that recomputes the link when needed). See + -- tho two constructors below. - No_Env_Getter : constant Env_Getter := (False, null); - -- This type represents a link to an env. It can be either a simple link - -- (just a pointer) or a dynamic link (a function that recomputes the link - -- when needed). - function Get_Env (Self : Env_Getter) return Lexical_Env; - -- Returns the environment associated to the Self env getter + No_Env_Getter : constant Env_Getter; function Simple_Env_Getter (E : Lexical_Env) return Env_Getter; - -- Constructs an env getter of the simple variety - pointer to env + -- Create a static Env_Getter (i.e. pointer to environment) function Dyn_Env_Getter (Fn : Getter_Fn_T; State : Getter_State_T) return Env_Getter; + -- Create a dynamic Env_Getter (i.e. function and closure to compute an + -- environment). + + function Get_Env (Self : Env_Getter) return Lexical_Env; + -- Return the environment associated to the Self env getter type Env_Rebinding is record Old_Env, New_Env : Env_Getter; end record; - No_Env_Rebinding : Env_Rebinding := (No_Env_Getter, No_Env_Getter); + No_Env_Rebinding : constant Env_Rebinding; type Env_Rebindings_Array is array (Positive range <>) of Env_Rebinding; @@ -335,6 +330,19 @@ package Langkit_Support.Lexical_Env is private + type Env_Getter (Dynamic : Boolean := False) is record + case Dynamic is + when True => + Getter_State : Getter_State_T; + Getter_Fn : Getter_Fn_T; + when False => + Env : Lexical_Env; + end case; + end record; + + No_Env_Getter : constant Env_Getter := (False, null); + No_Env_Rebinding : constant Env_Rebinding := (No_Env_Getter, No_Env_Getter); + Empty_Env_Map : aliased Internal_Envs.Map := Internal_Envs.Empty_Map; Empty_Env_Record : aliased Lexical_Env_Type := (Parent => No_Env_Getter,
add 1 second delay to acc pedal alert So that the screen wouldn't appear to "flicker" when openpilot is engaged while the acc pedal is being released
@@ -334,7 +334,7 @@ EVENTS: Dict[int, Dict[str, Union[Alert, Callable[[Any, messaging.SubMaster, boo "openpilot will not brake while gas pressed", "", AlertStatus.normal, AlertSize.small, - Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .0, .0, .1), + Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .0, .0, .1, creation_delay=1.), }, EventName.vehicleModelInvalid: {
[ROCm] Add dlpack backend support Depends on the Tensorflow commit included in this PR
@@ -64,6 +64,14 @@ def from_dlpack(dlpack): gpu_backend = xla_bridge.get_backend("cuda") except RuntimeError: gpu_backend = None + + # Try ROCm if CUDA backend not found + if gpu_backend is None: + try: + gpu_backend = xla_bridge.get_backend("rocm") + except RuntimeError: + gpu_backend = None + buf = xla_client._xla.dlpack_managed_tensor_to_buffer( dlpack, cpu_backend, gpu_backend)
cfn-lint: Restore iE3008 since the issue has been resolved They just released the 1.18.1 version that fixes the
@@ -209,10 +209,9 @@ deps = cfn-lint # E2504 disabled since does not allow two-digit numbers in ephemeral(n) # W2507 disabled since we want to have nullable String type parameters # E2523 disabled since we have both a Launch Template and Launch Configuration -# iE3008 disabled because of https://github.com/awslabs/cfn-python-lint/issues/564 commands = cfn-lint -iE2504 -iW2507 -iE2523 aws-parallelcluster.cfn.json - cfn-lint -iE3008 batch-substack.cfn.json + cfn-lint batch-substack.cfn.json cfn-lint ebs-substack.cfn.json cfn-lint efs-substack.cfn.json cfn-lint raid-substack.cfn.json
typo just fixing typo
@@ -48,7 +48,7 @@ n = 10 distribution1 = 'uniform' Remin_value = 20. Remax_value = 120. -#Opton 2 - Lognormal-Uniform +#Option 2 - Lognormal-Uniform distribution2 = 'lognormal_uniform' Remean = 4. Restandard_deviation = 0.25
switch_to_containers: remove ceph-disk references as of stable-4.0, ceph-disk is no longer supported.
- import_role: name: ceph-defaults - - name: collect running osds and ceph-disk unit(s) - shell: | - systemctl list-units | grep "loaded active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-disk@dev-[a-z]{3,4}[0-9]{1}.service|ceph-volume|ceph\.target' - register: running_osds - changed_when: false - failed_when: false - - - name: stop/disable/mask non-containerized ceph osd(s) and ceph-disk units (if any) - systemd: - name: "{{ item }}" - state: stopped - enabled: no - with_items: "{{ running_osds.stdout_lines | default([])}}" - when: running_osds != [] - - name: remove old ceph-osd systemd units file: path: "{{ item }}"
Remove unused TensorImpl dependencies Summary: Pull Request resolved:
#include <ATen/core/TensorImpl.h> #include <c10/core/Backend.h> -#include <ATen/core/LegacyTypeDispatch.h> #include <ATen/core/WrapDimMinimal.h> -#include "c10/util/Optional.h" - -#include <ATen/core/VariableHooksInterface.h> +#include <c10/util/Optional.h> namespace at {
Update cp2k_check.py Reverting back to default CP2K modulefile for gpu and mc checks
@@ -11,6 +11,7 @@ import reframe.utility.sanity as sn class Cp2kCheck(rfm.RunOnlyRegressionTest): def __init__(self): self.valid_prog_environs = ['builtin'] + self.modules = ['CP2K'] self.executable = 'cp2k.psmp' self.executable_opts = ['H2O-256.inp'] @@ -50,7 +51,6 @@ class Cp2kCpuCheck(Cp2kCheck): super().__init__() self.descr = 'CP2K CPU check (version: %s, %s)' % (scale, variant) self.valid_systems = ['daint:mc'] - self.modules = ['CP2K/7.1-CrayGNU-19.10'] if scale == 'small': self.valid_systems += ['dom:mc'] self.num_tasks = 216 @@ -91,7 +91,6 @@ class Cp2kGpuCheck(Cp2kCheck): super().__init__() self.descr = 'CP2K GPU check (version: %s, %s)' % (scale, variant) self.valid_systems = ['daint:gpu'] - self.modules = ['CP2K/7.1-CrayGNU-19.10-cuda-10.1'] self.num_gpus_per_node = 1 if scale == 'small': self.valid_systems += ['dom:gpu']
Update README Added line
[![PyPi](https://img.shields.io/pypi/v/wikidataintegrator.svg)](https://pypi.python.org/pypi/wikidataintegrator) # Installation # -The easiest way to install WikidataIntegrator is using `pip` +The easiest way to install WikidataIntegrator is using `pip` or `pip3`. WikidataIntegrator supports python3.6 and higher, hence the suggestion for pip3. If python2 is installed pip will lead to an error indicating missing dependencies. + ``` -pip install wikidataintegrator +pip3 install wikidataintegrator ``` You can also clone the repo and execute with administrator rights or install into a virtualenv. @@ -17,7 +18,7 @@ git clone https://github.com/sebotic/WikidataIntegrator.git cd WikidataIntegrator -python setup.py install +python3 setup.py install ``` To test for correct installation, start a python console and execute the following (Will retrieve the Wikidata item for ['Human'](http://www.wikidata.org/entity/Q5)):
Make sure image labels are ints Sometimes these are numpy.int64, which doesn't work at least in Python 3.
@@ -68,7 +68,7 @@ def image_generator(images, labels): yield { "image/encoded": [enc_string], "image/format": ["png"], - "image/class/label": [label], + "image/class/label": [int(label)], "image/height": [height], "image/width": [width] }
Update init.yml (FortiOS 5.4) Update init.yml (FortiOS 5.4)
@@ -17,6 +17,6 @@ prefixes: host: (?<=devname=)(.*)(?=\s+devid=) devid: (?<=devid=)(.*)(?=\s+logid=) logid: (?<=logid=)(.*)(?=\s+type=) - processName: ([^ ]+) - tag: ([^ ]+) - line: 'date={date} time={time} devname={host} devid={devid} logid={logid} {processName} {tag}' + processName: (?<=type=)(.*)(?=\s+subtype=) + tag: (?<=subtype=)(.*)(?=\s+level=) + line: 'date={date} time={time} devname={host} devid={devid} logid={logid} type={processName} subtype={tag}'