message
stringlengths
13
484
diff
stringlengths
38
4.63k
cloud roster: Work with custom conf dir A path to the salt-cloud config file is hardcoded into the cloud roster, so it won't work with a custom conf dir. Remove the hardcoded path and construct it using __opts__['conf_file'].
@@ -20,6 +20,7 @@ usually located at /etc/salt/cloud. For example, add the following: # Import python libs from __future__ import absolute_import +import os # Import Salt libs import salt.loader @@ -37,7 +38,9 @@ def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613 ''' ret = {} - cloud_opts = salt.config.cloud_config('/etc/salt/cloud') + cloud_opts = salt.config.cloud_config( + os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud') + ) minions = __runner__['cache.cloud'](tgt) for minion_id, full_info in minions.items():
Update model.py Fix linter.
@@ -1798,8 +1798,6 @@ class Model: metadata = utils.get_local_charm_metadata(charm_dir) charm_series = charm_series or await get_charm_series(metadata, self) - charm_origin.base = utils.get_local_charm_base, - charm_series, channel, metadata, charm_dir, client.Base) base = utils.get_local_charm_base( charm_series, channel, metadata, charm_dir, client.Base) charm_origin.base = base
_type can be absent in inner_hits Fixes
@@ -41,7 +41,7 @@ class Response(AttrDict): return self._shards.total == self._shards.successful and not self.timed_out def _get_result(self, hit): - dt = hit['_type'] + dt = hit.get('_type') for t in hit.get('inner_hits', ()): hit['inner_hits'][t] = Response(self._search, hit['inner_hits'][t]) callback = self._search._doc_type_map.get(dt, Hit)
Update README.md Fix formatting of HTML output example
@@ -559,6 +559,7 @@ class ContactPage(MenuPage): These change would result in the following HTML output when rendering a `ContactPage` instance in a main menu: ```html +... <li class=" dropdown"> <a href="/contact-us/" class="dropdown-toggle" id="ddtoggle_18" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Contact us <span class="caret"></span></a> <ul class="dropdown-menu" aria-labelledby="ddtoggle_18"> @@ -567,6 +568,7 @@ These change would result in the following HTML output when rendering a `Contact <li class="map"><a href="/contact-us/#map">Map &amp; directions</a></li> </ul> </li> +... ``` You can also modify sub-menu items based on field values for specific instances, rather than doing the same for every page of the same type. Here's another example:
Update dridex.txt ```ThreatGrid``` --> ```Dridex Malware Detected 100% (table)
@@ -122,3 +122,30 @@ topdalescotty.top 5.133.242.156:170 89.22.103.139:8000 ponestona.com + +# Reference: https://blog.talosintelligence.com/2019/07/threat-roundup-for-0705-0712.html (# Win.Packed.Xcnfe-7012508-0) + +5twtwy19pp.com +b7qxyidhg5.com +c62yc6xsm1.com +coxymk80cd.com +ct1wlbyjzx.com +exgk5nzv7m.com +fvtbhlnxj0.com +fwn4l9u2gb.com +fynzp0oht8.com +glixbn9lnj.com +gzw0bfzxhb.com +hludxizrvf.com +huga7gshpk.com +in4lprxgui.com +lqdu4kraxu.com +lrv8bvrmhq.com +porsukgrlq.com +rjhw2tvcvh.com +rm1cbe2kvb.com +seqamoa4jp.com +t0uetiplqk.com +tcp1twzitf.com +uttn4zziks.com +xpqvri1vhh.com
[Feature Store] Fix scheduled ingest [ML-3194](https://jira.iguazeng.com/browse/ML-3194)
@@ -444,7 +444,7 @@ def ingest( if isinstance(source, DataSource) and source.schedule: min_time = datetime.max - for target in targets or featureset.status.targets: + for target in featureset.status.targets: if target.last_written: cur_last_written = target.last_written if isinstance(cur_last_written, str):
Screengrab App : Allow client to opt out of saving the image. Now we have `WidgetAlgo.grab()`, users of the screengrab app may prefer capturing their images from the command / commandFile directly.
@@ -69,7 +69,7 @@ class screengrab( Gaffer.Application ) : description = "Where to save the resulting image", defaultValue = "", extensions = "png", - allowEmptyString = False, + allowEmptyString = True, ), IECore.StringVectorParameter( @@ -350,16 +350,9 @@ class screengrab( Gaffer.Application ) : # Write the image, creating a directory for it if necessary. - self.__waitForIdle() - - imageDir = os.path.dirname( args["image"].value ) - if imageDir and not os.path.isdir( imageDir ) : - IECore.msg( IECore.Msg.Level.Info, "screengrab", "Creating target directory [ %s ]" % imageDir ) - os.makedirs( imageDir ) - - pixmap = QtGui.QPixmap.grabWindow( self.getGrabWidget()._qtWidget().winId() ) + if args["image"].value : IECore.msg( IECore.Msg.Level.Info, "screengrab", "Writing image [ %s ]" % args["image"].value ) - pixmap.save( args["image"].value ) + GafferUI.WidgetAlgo.grab( widget = self.getGrabWidget(), imagePath = args["image"].value ) # Remove the script and any reference to the grab widget up so # we can shut down cleanly.
subtitle.remux: remove copyts copyts made that videos started at 10s and made subtitles be wrong.
@@ -117,7 +117,7 @@ class postprocess(object): log.info(u"Muxing {0} into {1}".format(orig_filename, new_name)) tempfile = u"{0}.temp".format(orig_filename) - arguments = ["-map", "0:{}".format(videotrack), "-map", "0:{}".format(audiotrack), "-c", "copy", "-copyts", "-f", "mp4"] + arguments = ["-map", "0:{}".format(videotrack), "-map", "0:{}".format(audiotrack), "-c", "copy", "-f", "mp4"] if ext == ".ts": arguments += ["-bsf:a", "aac_adtstoasc"]
Add # pytype: disable=import-error to a couple of import statements to allow cpu=ppc builds (the imported modules aren't currently linked into jaxlib when building for ppc).
@@ -59,7 +59,7 @@ else: jax_jit = xla_client._xla.jax_jit try: - from jaxlib import cusolver + from jaxlib import cusolver # pytype: disable=import-error except ImportError: cusolver = None @@ -69,7 +69,7 @@ except ImportError: rocsolver = None try: - from jaxlib import cuda_prng + from jaxlib import cuda_prng # pytype: disable=import-error except ImportError: cuda_prng = None
Correct 'see also' docs for some kernels [ci skip]
@@ -392,7 +392,7 @@ class Ring2DKernel(Kernel2D): See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, - Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel + TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- @@ -727,7 +727,7 @@ class AiryDisk2DKernel(Kernel2D): See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, - Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel + Ring2DKernel, TrapezoidDisk2DKernel, Moffat2DKernel Examples --------
Replace tf.math.ceil with znp.ceil. Some tests fail. See next commit for update of tensorflow fixing the issue.
@@ -5,6 +5,7 @@ import tensorflow as tf import tensorflow_addons as tfa import tensorflow_probability as tfp +import zfit.z.numpy as znp from .. import exception, z from ..core.data import Data, sum_samples from ..core.interfaces import ZfitPDF @@ -191,8 +192,7 @@ class FFTConvPDFV1(BaseFunctor): area_ratios = (upper_sample - lower_sample) / ( limits_kernel.rect_upper - limits_kernel.rect_lower) nbins_func_exact_max = tf.reduce_max(area_ratios * n) - nbins_func = tf.math.ceil( - nbins_func_exact_max) # plus one and floor is like ceiling (we want more bins) with the + nbins_func = znp.ceil(nbins_func_exact_max) # plus one and floor is like ceiling (we want more bins) with the # guarantee that we add one bin (e.g. if we hit exactly the boundaries, we add one. nbins_kernel = n # n = max(n, npoints_scaling)
upgrade sparsify server max file size Reviewers: mgoin, kevinaer, alexm, tuan, ben Subscribers: #core, #python_ml
@@ -24,9 +24,7 @@ from neuralmagicML.server.blueprints import ( system_blueprint, ui_blueprint, ) -from neuralmagicML.server.models import ( - database_setup, -) +from neuralmagicML.server.models import database_setup from neuralmagicML.server.workers import JobWorkerManager @@ -112,7 +110,7 @@ def run( ui_path = os.path.join(os.path.dirname(clean_path(__file__)), "ui") app = Flask("neuralmagicML.server", static_folder=os.path.join(ui_path, "static")) - + app.config["MAX_CONTENT_LENGTH"] = 2 * 1024 * 1024 * 1024 # 2 Gb limit app.config["UI_PATH"] = ui_path CORS(app) @@ -144,10 +142,7 @@ def parse_args() -> Any: "--port", default=5543, type=int, help="The local port to launch the server on" ) parser.add_argument( - "--debug", - default=False, - action="store_true", - help="Set to run in debug mode", + "--debug", default=False, action="store_true", help="Set to run in debug mode", ) parser.add_argument( "--logging-level",
fix(light_controller): pass a default value for the "on" method related
@@ -125,7 +125,7 @@ class LightController(TypeController[LightEntity], ReleaseHoldController): def get_predefined_actions_mapping(self) -> PredefinedActionsMapping: return { - Light.ON: self.on, + Light.ON: (self.on, (True,)), Light.OFF: self.off, Light.TOGGLE: self.toggle, Light.TOGGLE_FULL_BRIGHTNESS: (
Fix, was not bug compatible after optimizing issubclass built-in * Also add "isinstance" which is very similar code. This is only having an effect in full compat code, i.e. mostly when testing the CPython test suites, which do issubclass spreading over lines a lot.
@@ -239,6 +239,8 @@ def generateBuiltinIsinstanceCode(to_name, expression, emit, context): context=context, ) + context.setCurrentSourceCodeReference(expression.getCompatibleSourceReference()) + res_name = context.getIntResName() emit("%s = Nuitka_IsInstance(%s, %s);" % (res_name, inst_name, cls_name)) @@ -272,6 +274,8 @@ def generateBuiltinIssubclassCode(to_name, expression, emit, context): context=context, ) + context.setCurrentSourceCodeReference(expression.getCompatibleSourceReference()) + res_name = context.getIntResName() emit("%s = PyObject_IsSubclass(%s, %s);" % (res_name, cls_name, classes_name))
Lexical env: fix Get_New_Env documentation according to implementation TN:
@@ -116,8 +116,8 @@ package Langkit_Support.Lexical_Env is function Get_New_Env (Self : Env_Rebindings; Old_Env : Lexical_Env) return Lexical_Env; - -- Return the new env corresponding to Old_Env in Self. Return - -- No_Env_Getter if there is no association. + -- Return the new env corresponding to Old_Env in Self. Return Old_Env if + -- there is no association. ---------------------- -- Env_Element Type --
Fixed Error Message Forgot case where the Value could of repetitions could be None
@@ -137,7 +137,7 @@ class rope(_algorithm): raise ValueError("Repetition for following runs must be an " "even number.") - if repetitions < 30: + if repetitions is not None and repetitions < 30: raise ValueError("Repetitions must be at least 30") starttime = time.time()
Added placeholder Without this, a blank option appears, I think because tags is true.
@@ -21,6 +21,7 @@ hqDefine("hqwebapp/js/widgets", [ var $input = $(input); $input.select2(_.extend({ multiple: true, + placeholder: ' ', tags: true, tokenSeparators: [",", " "], createTag: function (params) {
swarming: remove init_symlinks This is not necessary anymore as we enabled symlink on Windows bots. Fixed:
@@ -9,34 +9,10 @@ import sys APP_DIR = os.path.dirname(os.path.realpath(os.path.abspath(__file__))) -def init_symlinks(root): - """Adds support for symlink-as-file on Windows. - - Manually resolves symlinks in path for directory and add them to sys.path. - """ - if sys.platform != 'win32': - return - for i in os.listdir(root): - if '.' in i: - continue - path = os.path.join(root, i) - if os.path.isfile(path): - # Found a file instead of a symlink to a directory. Adjust sys.path - # accordingly to where the symlink points. - with open(path) as f: - link = f.read() - if '\n' in link: - continue - dest = os.path.normpath(os.path.join(root, link)) - # This is not exactly right but close enough. - sys.path.insert(0, os.path.dirname(dest)) - - def setup_test_env(): """Sets up App Engine test environment.""" # For application modules. sys.path.insert(0, APP_DIR) - init_symlinks(APP_DIR) # TODO(maruel): Remove. sys.path.insert(0, os.path.join(APP_DIR, 'components', 'third_party'))
BUG: metadata access Fixed metadata access to be cleaner.
@@ -251,8 +251,8 @@ class TestBasics(): self.testInst.custom.attach(custom1, 'add') self.testInst.load(2009, 1) - assert self.testInst.meta['doubleMLT'].units == 'hours1' - assert self.testInst.meta['doubleMLT'].long_name == 'doubleMLT' + assert self.testInst.meta[('doubleMLT', 'units')] == 'hours1' + assert self.testInst.meta[('doubleMLT', 'long_name')] == 'doubleMLT' assert (self.testInst['doubleMLT'] == 2.0 * self.testInst['mlt']).all() assert len([kk for kk in self.testInst.data.keys()]) == self.ncols + 1
Swap FLASK_DEBUG for FLASK_ENV This achieves the same thing and gets rid of the warning about being in a production environment when the app starts up.
@@ -28,7 +28,7 @@ export FIRETEXT_API_KEY='FIRETEXT_ACTUAL_KEY' export NOTIFICATION_QUEUE_PREFIX='YOUR_OWN_PREFIX' export FLASK_APP=application.py -export FLASK_DEBUG=1 +export FLASK_ENV=development export WERKZEUG_DEBUG_PIN=off "> environment.sh ```
Remove incorrect reset line in UM232H pinout The datasheet says RST# is an input which resets the FT232H, not an output to the device under test.
@@ -80,9 +80,6 @@ Please read `4. UM232H Pin Out and Signal Descriptions <https://www.ftdichip.com * - AD3 - TMS - Test Mode State - * - RST# - - RESET - - Connect this pin to the (active low) reset input of the target CPU (EN for ESP32) You will also need to connect VIO to V3V and USB to 5V0 of UM232H to power the FTDI chip and board. See `UM232H Datasheet <https://www.ftdichip.com/Support/Documents/DataSheets/Modules/DS_UM232H.pdf>`_
Update core_screenshot_config.yaml Add 30 second delay to MI primary
@@ -35,6 +35,10 @@ primary: page.done(); message: clicking button to get rid of popup, using viewport height 8500 for IN + MI: + overseerScript: page.manualWait(); await page.waitForDelay(30000); page.done(); + message: waiting 30 sec to load MI + MT: overseerScript: page.manualWait(); await page.waitForDelay(30000); page.done(); message: waiting 30 sec to load MT
format microseconds correctly make sure results of num2date are same for arrays and scalar values
@@ -569,12 +569,23 @@ def DateFromJulianDay(JD, calendar='standard', only_use_cftime_datetimes=True, ms_eps = np.atleast_1d(np.array(np.finfo(np.float64).eps,np.longdouble)) ms_eps = 86400000000.*np.maximum(ms_eps*julian, ms_eps) microsecond = np.where(microsecond < ms_eps, 0, microsecond) - indxms = microsecond > 1000000-ms_eps - if indxms.any(): - julian[indxms] = julian[indxms] + 2*ms_eps[indxms]/86400000000. + if microsecond.ndim: # array + if calendar in ['standard', 'gregorian']: + ind_before = np.zeros(microsecond.size,dtype=np.bool) + for i in range(len(microsecond)): + if microsecond[i] > 1000000-ms_eps[i]: + julian[i] += 2*ms_eps[i]/86400000000. + year[i],month[i],day[i],hour[i],minute[i],second[i],microsecond[i],dayofyr[i],dayofwk[i],ind =\ + getdateinfo(julian[i]) + if calendar in ['standard', 'gregorian']: + ind_before[i] = ind + microsecond[i] = 0 + else: # scalar + if microsecond > 1000000-ms_eps: + julian += 2*ms_eps[indxms]/86400000000. year,month,day,hour,minute,second,microsecond,dayofyr,dayofwk,ind_before =\ getdateinfo(julian) - microsecond[indxms] = 0 + microsecond = 0 # check if input was scalar and change return accordingly isscalar = False @@ -1327,7 +1338,7 @@ Gregorial calendar. def __str__(self): second = '{:02d}'.format(self.second) if self.microsecond: - second += '.{}'.format(self.microsecond) + second += '.{:06d}'.format(self.microsecond) return "{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{}".format( self.year, self.month, self.day, self.hour, self.minute, second)
test_runner: Add comment explaining abnormal database destruction. When running the test-backend suite in serial mode, `destroy_test_db` double appends the database id number to the template if passed an argument for `number`. The comment here explains this behavior.
@@ -229,15 +229,26 @@ def destroy_test_databases(worker_id: Optional[int]=None) -> None: for alias in connections: connection = connections[alias] try: + # In the parallel mode, the test databases are created + # through the N=self.parallel child processes, and in the + # parent process (which calls `destroy_test_databases`), + # `settings_dict` remains unchanged, with the original + # template database name (zulip_test_template). So to + # delete the database zulip_test_template_<number>, we + # need to pass `number` to `destroy_test_db`. + # + # When we run in serial mode (self.parallel=1), we don't + # fork and thus both creation and destruction occur in the + # same process, which means `settings_dict` has been + # updated to have `zulip_test_template_<number>` as its + # database name by the creation code. As a result, to + # delete that database, we need to not pass a number + # argument to destroy_test_db. if worker_id is not None: """Modified from the Django original to """ database_id = random_id_range_start + worker_id connection.creation.destroy_test_db(number=database_id) else: - # In theory, this code path should be the same as the - # parallel one; but a bug results in that - # double-adding the database_id. So we save that - # double-add by just passing nothing here. connection.creation.destroy_test_db() except ProgrammingError: # DB doesn't exist. No need to do anything.
updated discovery added houbi as exchange name
@@ -19,7 +19,7 @@ The following walks through all the steps when running `config` for the first ti |-----|-----| | `What is your market making strategy >>>`: | Enter `discovery`.<br/><br/>Currently available options: `arbitrage` or `cross_exchange_market_making` or `pure_market_making` or `discovery` or `simple_trade` *(case sensitive)* | | `Import previous configs or create a new config file? (import/create) >>>`: | When running the bot for the first time, enter `create`.<br/>If you have previously initialized, enter `import`, which will then ask you to specify the config file location. | -| `Enter your first exchange name >>>`: | Enter an exchange you would like to trade on.<br/><br/>Currently available options: `binance`, `radar_relay`, `coinbase_pro`, `ddex`, `idex`, `bamboo_relay`, , or `huobi` *(case sensitive)* | +| `Enter your first exchange name >>>`: | Enter an exchange you would like to trade on.<br/><br/>Currently available options: `binance`, `radar_relay`, `coinbase_pro`, `ddex`, `idex`, `bamboo_relay`, or `huobi` *(case sensitive)* | | `Enter your second exchange name >>>`: | Enter another exchange you would like to trade on.<br/><br/>Currently available options: `binance`, `radar_relay`, `coinbase_pro`, `ddex`, `idex`, `bamboo_relay`, or `huobi` *(case sensitive)* | | `Enter list of trading pairs or token names on $FIRST_EXCHANGE >>>` | Enter the list of coin pairs that you wish to be included in Hummingbot's search for the first exchange, or hit enter to include all active pairs.<br/><br/>Pairs and tokens must be entered as an array: for example, if you want to include ZRX-WETH and WETH-DAI, you would enter it as `[ZRXWETH, WETHDAI]` or `[ZRX-WETH, WETH-DAI]` depending on the exchange. If you are interested in running discovery on all trading pairs for a single token, you may do so by entering `<$TOKEN_SYMBOL>` For instance, entering `[<ZRX>]` is the same as entering `[ZRX-USDT, ZRX-BTC, ZRX-DAI, ...]`. | | `Enter list of trading pairs or token names on $SECOND_EXCHANGE >>>` | Enter the list of coin pairs that you wish to be included in Hummingbot's search for the second exchange, or hit enter to include all active pairs. Pairs and tokens must be entered as an array (see above). |
Quant Noise Summary: Implemented fix bit scalar quantization with quant noise for pytext models
@@ -27,7 +27,6 @@ def quantize_model_(model, p=0.2, bits=8, update_step=3000, method="histogram", - bits: number of bits - update_step: update quantization parameters every update_step steps """ - # quantize all layers # remove weights indicates whether the weights extension should be removed, in addition to # weight_orig and weight extension on names
pkg_implementation_spec_ada.mako: remove obsolete comment TN:
@@ -568,9 +568,6 @@ package ${ada_lib_name}.Analysis.Implementation is -- We implement array types as discriminated records so that binding to C -- can be done without copy. - -- TODO??? This is likely to change in the near future: we would like to - -- have here pure Ada arrays instead. - % for array_type in ctx.sorted_types(ctx.array_types): % if array_type.element_type.should_emit_array_type: ${array_types.public_decl(array_type)}
Removed possibility to invalidate 'queue' context it was strange that nf invalidated this context now it doesn't seem to happen anymore perhaps was a nf bug
@@ -136,11 +136,6 @@ class EventsHandler(threading.Thread): if event.event_data['is_in_mylist']: # If video is in my list, invalidate the continueWatching list (update lolomo context data) api.update_lolomo_context('continueWatching') - else: - # Else invalidate the 'queue' list (update lolomo context data) - # Todo: get 'queue' lolomo id/index - # api.update_lolomo_context('queue') - pass api.update_videoid_bookmark(event.get_video_id()) # Below commented lines: let future requests continue to be sent, unstable connections like wi-fi cause problems # if not event.is_response_success():
Update control ipython3-qtconsole has been renamed in python3-qtconsole in debian10
@@ -11,7 +11,7 @@ Build-Depends: cython3 (>= 0.23.2), graphviz, help2man, ipython3, - ipython3-qtconsole, + python3-qtconsole, pandoc <!nodoc>, python3-all-dev, python3-dateutil,
Add a stratisd test This is a very relevant test, since GetManagedObjects is the primary way to get information about stratisd's world over the D-Bus.
@@ -20,7 +20,7 @@ import sys import time import unittest -from testlib.utils import exec_command, process_exists +from testlib.utils import exec_command, exec_test_command, process_exists from testlib.stratis import StratisCli, clean_up DISKS = [] @@ -49,6 +49,27 @@ class StratisCertify(unittest.TestCase): StratisCli.destroy_all() assert StratisCli.pool_list() == [] + def test_get_managed_objects(self): + """ + Test that GetManagedObjects returns a string w/out failure. + """ + exit_code, stdout, stderr = exec_test_command( + [ + "busctl", + "call", + "org.storage.stratis1", + "/org/storage/stratis1", + "org.freedesktop.DBus.ObjectManager", + "GetManagedObjects", + "--verbose", + "--no-pager", + "--timeout=1200", + ] + ) + self.assertEqual(exit_code, 0) + self.assertEqual(stderr, "") + self.assertNotEqual(stdout, "") + if __name__ == "__main__": ARGUMENT_PARSER = argparse.ArgumentParser()
Fix error from combination of two Complex.c changes Essentially SoftComplexToPy required another bit of utility code that has now become tempita. I've just skipped using this utility code
@@ -349,12 +349,11 @@ static double __Pyx_SoftComplexToDouble(__pyx_t_double_complex value) { static PyObject *__pyx_Py_FromSoftComplex(__pyx_t_double_complex value); /* proto */ //////// SoftComplexToPy //////////////// -//@requires: ToPy //@requires: RealImag static PyObject *__pyx_Py_FromSoftComplex(__pyx_t_double_complex value) { if (__Pyx_CIMAG(value)) { - return __pyx_PyComplex_FromComplex(value); + return PyComplex_FromDoubles(__Pyx_CREAL(value), __Pyx_CIMAG(value)); } else { return PyFloat_FromDouble(__Pyx_CREAL(value)); }
making inspector object singleton again by assigning to the base pollster class Closes-Bug:
@@ -47,21 +47,21 @@ class GenericComputePollster(plugin_base.PollsterBase): def setup_environment(self): super(GenericComputePollster, self).setup_environment() - self.inspector = self._get_inspector(self.conf) + self.inspector = GenericComputePollster._get_inspector(self.conf) @staticmethod def aggregate_method(stats): # Don't aggregate anything by default return stats - @classmethod - def _get_inspector(cls, conf): + @staticmethod + def _get_inspector(conf): # FIXME(sileht): This doesn't looks threadsafe... try: - inspector = cls._inspector + inspector = GenericComputePollster._inspector except AttributeError: inspector = virt_inspector.get_hypervisor_inspector(conf) - cls._inspector = inspector + GenericComputePollster._inspector = inspector return inspector @property
MAINT: Print more informative message when test fails. The Deprecation tests were only printing the warning type on failure, print the whole warning instead so that it is possible to know where the warnings originated.
@@ -101,7 +101,7 @@ def assert_deprecated(self, function, num=1, ignore_others=False, (self.warning_cls.__name__, warning.category)) if num is not None and num_found != num: msg = "%i warnings found but %i expected." % (len(self.log), num) - lst = [str(w.category) for w in self.log] + lst = [str(w) for w in self.log] raise AssertionError("\n".join([msg] + lst)) with warnings.catch_warnings():
fix Total Annual energy in proforma was using PV degradation factor for every tech. now it just sums the annual values from each tech.
@@ -332,7 +332,6 @@ def generate_proforma(scenariomodel, template_workbook, output_file_path): current_row += 1 ws['A{}'.format(current_row)] = "Year 1 net energy produced with system (kWh/year)" ws['B{}'.format(current_row)] = wind_energy + generator_energy + sum([pv['pv_energy'] for pv in pv_data]) - total_energy_cell = 'B{}'.format(current_row) make_attribute_row(ws, current_row) current_row += 1 current_row += 1 @@ -1063,6 +1062,7 @@ def generate_proforma(scenariomodel, template_workbook, output_file_path): current_row += 1 current_row += 1 + #################################################################################################################### # Annual Value Summary #################################################################################################################### @@ -1072,20 +1072,7 @@ def generate_proforma(scenariomodel, template_workbook, output_file_path): for i in range(financial.analysis_years + 1): ws['{}{}'.format(upper_case_letters[i + 1], current_row)] = i make_title_row(ws, current_row, length=financial.analysis_years + 2) - current_row += 1 - ws['A{}'.format(current_row)] = "Total Annual energy (kWh)" - ws['B{}'.format(current_row)] = 0 - ws['C{}'.format(current_row)] = '={}'.format(total_energy_cell) - for i in range(2, financial.analysis_years + 1): - ws['{}{}'.format(upper_case_letters[1 + i], current_row)] = \ - '={prev_col}{row}*(1-{pv_degradation_rate_cell}/100)'.format( - prev_col=upper_case_letters[i], row=current_row, - pv_degradation_rate_cell=pv_cell_locations[0]["pv_degradation_rate_cell"]) - make_attribute_row(ws, current_row, length=financial.analysis_years + 2, alignment=center_align, - number_format='#,##0') - fill_cols(ws, range(2, financial.analysis_years + 2), current_row, calculated_fill) - fill_cols(ws, range(1, 2), current_row, grey_fill) current_row += 1 for idx, pv in enumerate(pv_data): @@ -1137,6 +1124,21 @@ def generate_proforma(scenariomodel, template_workbook, output_file_path): fill_cols(ws, range(2, financial.analysis_years + 2), current_row, calculated_fill) fill_cols(ws, range(1, 2), current_row, grey_fill) + current_row += 1 + ws['A{}'.format(current_row)] = "Total Annual energy (kWh)" + ws['B{}'.format(current_row)] = 0 + + for i in range(1, financial.analysis_years+1): + ws['{}{}'.format(upper_case_letters[i+1], current_row)] = \ + '=SUM({col}{first_row}:{col}{last_row})'.format( + col=upper_case_letters[i+1], first_row=current_row-3, last_row=current_row-1 + ) + + make_attribute_row(ws, current_row, length=financial.analysis_years + 2, alignment=center_align, + number_format='#,##0') + fill_cols(ws, range(2, financial.analysis_years + 2), current_row, calculated_fill) + fill_cols(ws, range(1, 2), current_row, grey_fill) + current_row += 1 ws['A{}'.format(current_row)] = "Electricity bill without system ($)" ws['B{}'.format(current_row)] = 0
Bugfix: TP spam vec's todense() needs to return a ndarray for reps to work. This wasn't a problem before introducting the C-representations which require more strictly that a numpy array and not just a ProtectedArray be passed as an argument to initialize it.
@@ -701,7 +701,7 @@ class DenseSPAMVec(SPAMVec): in `scratch` maybe used when it is not-None. """ #don't use scratch since we already have memory allocated - return self.base[:,0] + return _np.asarray(self.base[:,0]) def __copy__(self): # We need to implement __copy__ because we defer all non-existing
docs: added note about `pre-commit autoupdate` Added explanation to docs why `pre-commit autoupdate` will not find latest version for Poetry's hooks.
@@ -24,7 +24,6 @@ the defaults are overwritten. You must fully specify all arguments for your hook if you make use of `args:`. {{% /note %}} - ## poetry-check The `poetry-check` hook calls the `poetry check` command @@ -99,3 +98,24 @@ repos: - id: poetry-export args: ["-f", "requirements.txt", "-o", "requirements.txt"] ``` + +## FAQ + +### Why does `pre-commit autoupdate` not update to the latest version? + +`pre-commit autoupdate` updates the `rev` for each repository defined in your `.pre-commit-config.yaml` +to the latest available tag in the default branch. + +Poetry follows a branching strategy, where the default branch is the active developement branch +and fixes gets back ported to stable branches. New tags are assigned in these stable branches. + +`pre-commit` does not support such a branching strategy and has decided to not implement +an option, either on the [user side](https://github.com/pre-commit/pre-commit/issues/2512) +or [hook author side](https://github.com/pre-commit/pre-commit/issues/2508), to define a branch for lookup the latest +available tag. + +Thus, `pre-commit autoupdate` is not usable for the hooks described here. + +You can avoid changing the `rev` to an unexpected value, by using the `--repo` parameter (may be specified multiple +times), to explicit list repositories that should be updated. An option to explicit exclude +repositories [will not be implemented](https://github.com/pre-commit/pre-commit/issues/1959) into `pre-commit`.
Restored onPreProcess and validation code Even though the UI no longer allows invalid answers since it doesn't support free text, it's still possible for another question to update a combobox with an invalid answer.
@@ -547,6 +547,9 @@ hqDefine("cloudcare/js/form_entry/entrycontrols_full", function () { self.options.subscribe(function () { self.renderSelect2(); + if (!self.isValid(self.rawAnswer())) { + self.question.error(gettext('Not a valid choice')); + } }); self.additionalSelect2Options = function () { @@ -614,7 +617,7 @@ hqDefine("cloudcare/js/form_entry/entrycontrols_full", function () { if (!value) { return true; } - return _.contains(self.choices(), value); + return _.contains(_.pluck(self.options(), 'text'), value); }; self.enableReceiver(question, options); @@ -656,6 +659,24 @@ hqDefine("cloudcare/js/form_entry/entrycontrols_full", function () { ComboboxEntry.prototype = Object.create(DropdownEntry.prototype); ComboboxEntry.prototype.constructor = DropdownEntry; + ComboboxEntry.prototype.onPreProcess = function (newValue) { + var value; + if (newValue === Const.NO_ANSWER || newValue === '') { + this.answer(Const.NO_ANSWER); + this.question.error(null); + return; + } + + value = _.find(this.options(), function (d) { + return d.text === newValue; + }); + if (value) { + this.answer(value.id); + this.question.error(null); + } else { + this.question.error(gettext('Not a valid choice')); + } + }; ComboboxEntry.prototype.receiveMessage = function (message, field) { // Iterates through options and selects an option that matches message[field]. // Registers a no answer if message[field] is not in options.
Docs: Update bag.rst This PR updates `bag.rst` with a single fix: split *Dask.Bag* to *Dask Bag*.
Bag === -Dask.Bag parallelizes computations across a large collection of generic Python +Dask Bag parallelizes computations across a large collection of generic Python objects. It is particularly useful when dealing with large quantities of semi-structured data like JSON blobs or log files.
fix: Use urllib instead of requests Simply because "too much effort" to add another library pfft
+import json import os import re -import requests import shlex import subprocess import sys +import urllib.request +def get_files_list(pr_number): + req = urllib.request.Request(f"https://api.github.com/repos/frappe/frappe/pulls/{pr_number}/files") + res = urllib.request.urlopen(req) + dump = json.loads(res.read().decode('utf8')) + return [change["filename"] for change in dump] + def get_output(command, shell=True): print(command) command = shlex.split(command) @@ -31,8 +38,7 @@ if __name__ == "__main__": pr_number = os.environ.get("PR_NUMBER") if not files_list and pr_number: - res = requests.get(f"https://api.github.com/repos/frappe/frappe/pulls/{pr_number}/files") - files_list = [f["filename"] for f in res.json()] + files_list = get_files_list(pr_number=pr_number) if not files_list: print("No files' changes detected. Build is shutting")
apidoc: Show customer owner can create/delete user apidoc states that the token type is "User authorization token" when supplied for creating or deleting a user. The token needs to be the user that is the customer owner
@@ -80,7 +80,7 @@ class UserViewSet(mixins.CreateModelMixin, @apiVersion 1.0.0 @apiDescription Create a user. - @apiHeader {String} token User authorization token. + @apiHeader {String} token Customer owner authorization token. @apiHeaderExample {json} Header-Example: { "Authorization": "Token 45138a913da44ab89532bab0352ef84b" @@ -184,7 +184,7 @@ class UserViewSet(mixins.CreateModelMixin, @apiVersion 1.0.0 @apiDescription Delete a user. - @apiHeader {String} token User authorization token. + @apiHeader {String} token Customer owner authorization token. @apiHeaderExample {json} Header-Example: { "Authorization": "Token 45138a913da44ab89532bab0352ef84b"
settings: Fix typing for LDAP_EMAIL_ATTR. This is already defined in `prod_settings_template`.
@@ -135,7 +135,7 @@ if FAKE_LDAP_MODE: "custom_profile_field__phone_number": "phoneNumber", } elif FAKE_LDAP_MODE == 'c': - LDAP_EMAIL_ATTR = 'email' # type: Optional[str] + LDAP_EMAIL_ATTR = 'email' AUTH_LDAP_USER_ATTR_MAP = { "full_name": "cn", }
Fix language check on inserting code snippets into Script Editor Restores language check for ScriptEditor widget when inserting a code snippet to the editor with the insert button.
@@ -104,28 +104,25 @@ class CodeSnippetDisplay extends MetadataDisplay< this._evtMouseUp = this._evtMouseUp.bind(this); } - // Handle code snippet insert into an editor + // Handle code snippet insertion into an editor private insertCodeSnippet = async (snippet: IMetadata): Promise<void> => { - const widget: Widget = this.props.getCurrentWidget(); - const snippetStr: string = snippet.metadata.code.join('\n'); + const widget = this.props.getCurrentWidget(); + const snippetStr = snippet.metadata.code.join('\n'); - if ( - widget instanceof DocumentWidget && - (widget as DocumentWidget).content instanceof FileEditor - ) { - const documentWidget = widget as DocumentWidget; - const fileEditor = (documentWidget.content as FileEditor).editor; + if (this.isFileEditor(widget)) { + const fileEditor = widget.content.editor; const markdownRegex = /^\.(md|mkdn?|mdown|markdown)$/; if ( PathExt.extname(widget.context.path).match(markdownRegex) !== null && snippet.metadata.language.toLowerCase() !== 'markdown' ) { - // Wrap snippet into a code block when inserting it into a markdown file fileEditor.replaceSelection( - '```' + snippet.metadata.language + '\n' + snippetStr + '\n```' + this.addMarkdownCodeBlock(snippet.metadata.language, snippetStr) ); - } else if (widget.constructor.name == 'PythonFileEditor') { - this.verifyLanguageAndInsert(snippet, 'python', fileEditor); + } else if (widget.constructor.name === 'ScriptEditor') { + const editorLanguage = + widget.context.sessionContext.kernelPreference.language; + this.verifyLanguageAndInsert(snippet, editorLanguage, fileEditor); } else { fileEditor.replaceSelection(snippetStr); } @@ -149,9 +146,8 @@ class CodeSnippetDisplay extends MetadataDisplay< notebookCell instanceof MarkdownCell && snippet.metadata.language.toLowerCase() !== 'markdown' ) { - // Wrap snippet into a code block when inserting it into a markdown cell notebookCellEditor.replaceSelection( - '```' + snippet.metadata.language + '\n' + snippetStr + '\n```' + this.addMarkdownCodeBlock(snippet.metadata.language, snippetStr) ); } else { notebookCellEditor.replaceSelection(snippetStr); @@ -163,6 +159,18 @@ class CodeSnippetDisplay extends MetadataDisplay< } }; + // Verify if a given widget is a FileEditor + private isFileEditor = ( + widget: Widget + ): widget is DocumentWidget<FileEditor> => { + return (widget as DocumentWidget).content instanceof FileEditor; + }; + + // Return the given code wrapped in a markdown code block + private addMarkdownCodeBlock = (language: string, code: string): string => { + return '```' + language + '\n' + code + '\n```'; + }; + // Handle language compatibility between code snippet and editor private verifyLanguageAndInsert = async ( snippet: IMetadata,
Adapt documentation to reflect new supported argument Reflect the changes of
@@ -559,7 +559,7 @@ Example:: add_docstr(torch._C.cat, """ -cat(seq, dim=0) -> Tensor +cat(seq, dim=0, out=None) -> Tensor Concatenates the given sequence of :attr:`seq` Tensors in the given dimension. @@ -570,6 +570,7 @@ Concatenates the given sequence of :attr:`seq` Tensors in the given dimension. Args: seq (sequence of Tensors): Can be any python sequence of `Tensor` of the same type. dim (int, optional): The dimension over which the tensors are concatenated + out (Tensor, optional): Output argument Example::
doc/robotics: emphasize positive directions Move it to the class description, because it applies to all methods Use minimal and concise language, with only one definition per line, instead of having an elaborate paragraph.
@@ -8,7 +8,20 @@ from ._common import Control as _Control class DriveBase: """A robotic vehicle with two powered wheels and an optional support - wheel or caster.""" + wheel or caster. + + By specifying the dimensions of your robot, this class + makes it easy to drive a given distance in millimeters or turn by a given + number of degrees. + + **Positive** distances and drive speeds mean + driving **forward**. **Negative** means **backward**. + + **Positive** angles and turn rates mean turning **right**. + **Negative** means **left**. So when viewed from the top, + positive means clockwise and negative means counterclockwise. + + """ distance_control = _Control() """The traveled distance and drive speed are controlled by a PID @@ -38,10 +51,8 @@ class DriveBase: measured at the center point between the wheels of the robot. Arguments: - drive_speed (:ref:`linspeed`): Speed of the robot. Positive is forward, - negative is backward. - turn_rate (:ref:`speed`): Turn rate of the robot. Positive is to the - right, negative is to the left. + drive_speed (:ref:`linspeed`): Speed of the robot. + turn_rate (:ref:`speed`): Turn rate of the robot. """ pass
Fixes validators version requirement The current requirement is 0.18.2, but this is invalid for Python 2.7 The last version that supports Python 2.7 is 0.14.2. So, unless we drop Python 2.7 support (meaning dropping Win32 and OSX) we must ask for 0.14.2.
@@ -5,5 +5,5 @@ future # For print statements. tqdm >= 4.30.0 # Progress bar. requests >= 2.18.4 # Scrape, API and web modules. babel >= 2.6 # For currency format by the language in the spreadsheet. -validators >= 0.18.2 # For validation of datasheet URLs in the spreadsheet. +validators >= 0.14.2 # For validation of datasheet URLs in the spreadsheet. wxPython >= 4.0 # Graphical package/library needed to user guide.
Update hosts malicious hosts
@@ -2467,3 +2467,20 @@ analytics.shein.co.uk 0.0.0.0 auspost-record.com 0.0.0.0 alert-auspost.com 0.0.0.0 auspost-input.com + +# Added January 22, 2023 +0.0.0.0 vep.hautnah-kosmetik-grafenau.de +0.0.0.0 ttri.kanzlei-ecklmaier.de +0.0.0.0 d-stuff.cfd +0.0.0.0 ruhrboudoir.de +0.0.0.0 d-bring.cfd +0.0.0.0 obev.pauya.de +0.0.0.0 dimoradellapizzuta.it +0.0.0.0 misanoassicurazioni.it +0.0.0.0 d-enemy.cfd +0.0.0.0 sindelfingen-haarentfernung.de +0.0.0.0 asdpoliziamunicipalepa.it +0.0.0.0 mf-massoterapia.it +0.0.0.0 tyty.lolokira.lol +0.0.0.0 yord.serawent-pole.lol +0.0.0.0 redivosrl.it
Update utils.py implemented AverageMeterGroup and NamedAverageMeter classes
@@ -316,6 +316,69 @@ class AttrDict(dict): self.__dict__ = self +lass AverageMeterGroup: + """Average meter group for multiple average meters, ported from Naszilla repo.""" + + def __init__(self): + self.meters = OrderedDict() + + def update(self, data, n=1): + for k, v in data.items(): + if k not in self.meters: + self.meters[k] = NamedAverageMeter(k, ":4f") + self.meters[k].update(v, n=n) + + def __getattr__(self, item): + return self.meters[item] + + def __getitem__(self, item): + return self.meters[item] + + def __str__(self): + return " ".join(str(v) for v in self.meters.values()) + + def summary(self): + return " ".join(v.summary() for v in self.meters.values()) + + +class NamedAverageMeter: + """Computes and stores the average and current value, ported from naszilla repo""" + + def __init__(self, name, fmt=':f'): + """ + Initialization of AverageMeter + Parameters + ---------- + name : str + Name to display. + fmt : str + Format string to print the values. + """ + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + def summary(self): + fmtstr = '{name}: {avg' + self.fmt + '}' + return fmtstr.format(**self.__dict__) + + class AverageMeter(object): def __init__(self):
test should use layer name Switch test to use layer type rather than name - name changes depending on quantity and ordering of tests.
@@ -694,9 +694,9 @@ class TestCornerCases(unittest.TestCase): ) spec = mlmodel.get_spec() - output_names = [layer.name for layer in spec.neuralNetwork.layers] - expected_names = [u'model/conv2d/Conv2D', u'Identity'] - np.testing.assert_array_equal(output_names, expected_names) + output_types = [layer.WhichOneof('layer') for layer in spec.neuralNetwork.layers] + expected_types = ['convolution', 'transpose'] + np.testing.assert_array_equal(output_types, expected_types) if __name__ == '__main__':
Modify tests/conftest.py - Change order of factory functions called by dc_pod_factory
@@ -764,8 +764,8 @@ def service_account_factory(request): @pytest.fixture() def dc_pod_factory( request, - service_account_factory, pvc_factory, + service_account_factory ): """ Create deploymentconfig pods
fix(device): fix button mapping of WXCJKG13LMLightController Bottom right button had the wrong action
@@ -189,7 +189,7 @@ class WXCJKG13LMLightController(LightController): "button_5_hold": Light.HOLD_COLOR_DOWN, "button_5_release": Light.RELEASE, "button_6_single": Light.CLICK_COLOR_UP, - "button_6_double": Light.ON_MIN_COLOR_TEMP, + "button_6_double": Light.ON_FULL_COLOR_TEMP, "button_6_hold": Light.HOLD_COLOR_UP, "button_6_release": Light.RELEASE, }
Update spectral.py Clarify documentation
@@ -46,7 +46,7 @@ def lombscargle(x, freqs : array_like Angular frequencies for output periodogram. precenter : bool, optional - Pre-center amplitudes by subtracting the mean. + Pre-center measurement values by subtracting the mean. normalize : bool, optional Compute normalized periodogram.
[dagit] Update "Timing" column for Queued runs Summary: Queued runs should say "Queued" instead of starting. Test Plan: Start a run, verify row in Queued tab. Reviewers: johann, dgibson, prha
@@ -9,6 +9,7 @@ import {PythonErrorInfo} from 'src/PythonErrorInfo'; import {TokenizingFieldValue} from 'src/TokenizingField'; import {RunActionsMenu, RunBulkActionsMenu} from 'src/runs/RunActionsMenu'; import {RunStatusTagWithStats} from 'src/runs/RunStatusTag'; +import {queuedStatuses} from 'src/runs/RunStatuses'; import {RunTags} from 'src/runs/RunTags'; import {RunComponentFragments, RunElapsed, RunTime, titleForRun} from 'src/runs/RunUtils'; import {RunTableRunFragment} from 'src/runs/types/RunTableRunFragment'; @@ -242,7 +243,7 @@ const RunRow: React.FC<{ </td> <td style={{maxWidth: '150px', whiteSpace: 'nowrap'}}> <RunTime run={run} /> - <RunElapsed run={run} /> + {queuedStatuses.has(run.status) ? null : <RunElapsed run={run} />} </td> {additionalColumns} <td style={{maxWidth: '52px'}}>
[core] Add '-d' as shorthand for '--debug' see
@@ -24,7 +24,7 @@ class Config(util.store.Store): help='Specify the name of an iconset to use (overrides theme default)') parser.add_argument('-a', '--autohide', nargs='+', default=[], help='Specify a list of modules to hide when not in warning/error state') - parser.add_argument('--debug', action='store_true', + parser.add_argument('-d', '--debug', action='store_true', help='Add debug fields to i3 output') self.__args = parser.parse_args(args)
Remove ex13 from the list of exceptions The dependency pygmsh was removed from ex13 in an earlier commit and, hence, we relicense the file to BSD-3-Clause.
@@ -34,7 +34,7 @@ later in this document or at the beginning of the respective files. Some examples under `docs/examples/` are licensed differently. In particular, they have a GPL-licensed dependency and, therefore, are also GPL-licensed. -- docs/examples/ex{04,13,28,32,35}.py +- docs/examples/ex{04,28,32,35}.py > Copyright 2018-2020 scikit-fem developers >
Set the `paramiko` package version to 2.10.3 `paramiko` package version prior to 2.10.1 has a race condition which could be a potential security issue.
@@ -10,7 +10,7 @@ typing; python_version < '3.5' # typing package sup enum34<=1.1.10; python_version < '3.5' # Enum package backport for python 2.7 # All dependencies needed in extras for rucio client (and server/daemons) should be defined here -paramiko~=2.9.2 # ssh_extras; SSH2 protocol library (also needed in the server) +paramiko~=2.10.3 # ssh_extras; SSH2 protocol library (also needed in the server) Race condition before 2.10.1 (https://github.com/advisories/GHSA-f8q4-jwww-x3wv) kerberos~=1.3.1 # kerberos_extras for client and server pykerberos~=1.2.1 # kerberos_extras for client and server requests-kerberos>=0.12.0 # kerberos_extras for client and server
Stop using jinja2's tojson We're already vendoring the filter, which also has less features than what we want to support.
@@ -460,13 +460,9 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None): undefined=jinja2.StrictUndefined, **env_args ) - tojson_filter = jinja_env.filters.get("tojson") indent_filter = jinja_env.filters.get("indent") jinja_env.tests.update(JinjaTest.salt_jinja_tests) jinja_env.filters.update(JinjaFilter.salt_jinja_filters) - if tojson_filter is not None: - # Use the existing tojson filter, if present (jinja2 >= 2.9) - jinja_env.filters["tojson"] = tojson_filter if salt.utils.jinja.JINJA_VERSION >= LooseVersion("2.11"): # Use the existing indent filter on Jinja versions where it's not broken jinja_env.filters["indent"] = indent_filter
Travis: Improve build time Install binary ffmpeg (3.2.4) package instead of build it
@@ -7,7 +7,7 @@ python: cache: directories: - $HOME/.cache/pip -sudo: false +sudo: required matrix: fast_finish: true allow_failures: @@ -43,20 +43,15 @@ addons: - liblapack-dev - python-pyexiv2 - yasm +before_install: + - echo "deb http://archive.ubuntu.com/ubuntu zesty main universe" | sudo tee --append /etc/apt/sources.list + - sudo apt-get update + - sudo apt-get install ffmpeg -y -f install: - pip install -U --upgrade pip - cd $TRAVIS_BUILD_DIR && make setup - pip install coveralls - pip install cairosvg - - wget https://ffmpeg.org/releases/ffmpeg-3.2.4.tar.xz - -O /tmp/ffmpeg-3.2.4.tar.xz - - mkdir /tmp/ffmpeg-source - - tar -C /tmp/ffmpeg-source --strip 1 -xvf /tmp/ffmpeg-3.2.4.tar.xz - - cd /tmp/ffmpeg-source && ./configure --prefix=/tmp --enable-gpl --enable-libx264 --enable-libvpx - - cd /tmp/ffmpeg-source && make - - cd /tmp/ffmpeg-source && make install - - export PATH=/tmp/bin:$PATH - - ffmpeg -version before_script: - cd $TRAVIS_BUILD_DIR && make redis script:
join() parallel process instead of a recursive sleep its not clear to me why the recursive calls were chosen originally. this should address
@@ -2183,8 +2183,7 @@ class State(object): continue if run_dict[tag].get('proc'): # Run in parallel, first wait for a touch and then recheck - time.sleep(0.01) - return self.check_requisite(low, running, chunks, pre) + run_dict[tag].get('proc').join() if r_state == 'onfail': if run_dict[tag]['result'] is True: fun_stats.add('onfail') # At least one state is OK
chore: Link to the newer Cloud NDB repo in the README This should make it easier to find the new code and reduce the potential for confusion.
## Introduction +This repository is for the original Datastore ndb client library. +If you are looking for Cloud NDB, which supports Python 3 and works both inside and outside of the Google App Engine environment, please see [this repository][0]. + --- **Note:** As of Google App Engine SDK 1.6.4, ndb has reached status General Availability. @@ -12,9 +15,9 @@ Using ndb from outside of Google App Engine (without the use of Remote API) is c --- -ndb is a client library for use with [Google Cloud Datastore][0]. +ndb is a client library for use with [Google Cloud Datastore][1]. It was designed specifically to be used from within the -[Google App Engine][1] Python runtime. +[Google App Engine][2] Python runtime. ndb is included in the Python runtime and is available through a standard Python import. @@ -28,9 +31,10 @@ however that ndb depends on the non-public Google Datastore App Engine RPC API. ## Overview Learn how to use the ndb library by visiting the Google Cloud Platform -[documentation][2]. +[documentation][3]. -[0]:https://cloud.google.com/datastore -[1]:https://cloud.google.com/appengine -[2]:https://cloud.google.com/appengine/docs/python/ndb/ +[0]:https://github.com/googleapis/python-ndb +[1]:https://cloud.google.com/datastore +[2]:https://cloud.google.com/appengine +[3]:https://cloud.google.com/appengine/docs/python/ndb/
Fixed grub interactive prompt removed upgrade apt
@@ -54,7 +54,7 @@ gcloud beta compute instances create crypto-driver \ * Installing necessary tools like java, git, maven, pip, python 2.7 and Cloud Bigtable command line tool cbt using the following command: ```console sudo -s - apt-get -y update && apt-get -y upgrade + apt-get -y update apt -y --allow-downgrades install python2.7 python-pip openjdk-8-jdk git maven google-cloud-sdk=271.0.0-0 google-cloud-sdk-cbt=271.0.0-0 ```
Update schedule.py fullfill requirement of a cloudwatch rulename: ([a-zA-Z0-9-_]+))
@@ -19,7 +19,7 @@ def schedule(args): json_config = json.load(config_file) namespace = json_config['namespace'] - rulename = "%s.%s-%s-mins" % (function_name, namespace, schedule_mins) + rulename = "%s-%s-%s-mins" % (function_name, namespace, schedule_mins) # create the cloudwatch event rule rule = cw.put_rule(Name=rulename,
adopters: add Keruyun Via:
@@ -28,6 +28,7 @@ This is a list of TiDB adopters in various industries. - [FUNYOURS JAPAN (Gaming)](http://company.funyours.co.jp/) - [Hainan eKing Technology (Enterprise Technology)](https://www.crunchbase.com/organization/hainan-eking-technology) - [2Dfire (FoodTech)](http://www.2dfire.com/) +- [Keruyun (FoodTech)](http://www.keruyun.com/en) - [G7 (Internet of Things)](https://www.english.g7.com.cn/) - [Yimian Data (Big Data)](https://www.yimian.com.cn) - [Wanda Internet Technology Group (Big Data)](http://www.wanda-tech.cn/en/) \ No newline at end of file
Fixed reorientate tests for big endian machines exif_str() return a little endian EXIF Thanks
@@ -28,7 +28,7 @@ STORAGE_PATH = abspath(join(dirname(__file__), '../fixtures/images/')) def exif_str(x): - return b'Exif\x00\x00II*\x00\x08\x00\x00\x00\x05\x00\x12\x01\x03\x00\x01\x00\x00\x00%s\x00\x00\x1a\x01\x05\x00\x01\x00\x00\x00J\x00\x00\x00\x1b\x01\x05\x00\x01\x00\x00\x00R\x00\x00\x00(\x01\x03\x00\x01\x00\x00\x00\x02\x00\x00\x00\x13\x02\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x01\x00\x00\x00H\x00\x00\x00\x01\x00\x00\x00' % pack('h', x) # noqa + return b'Exif\x00\x00II*\x00\x08\x00\x00\x00\x05\x00\x12\x01\x03\x00\x01\x00\x00\x00%s\x00\x00\x1a\x01\x05\x00\x01\x00\x00\x00J\x00\x00\x00\x1b\x01\x05\x00\x01\x00\x00\x00R\x00\x00\x00(\x01\x03\x00\x01\x00\x00\x00\x02\x00\x00\x00\x13\x02\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00H\x00\x00\x00\x01\x00\x00\x00H\x00\x00\x00\x01\x00\x00\x00' % pack('<h', x) # noqa class BaseEngineTestCase(TestCase):
Fixed wrong path of ceph.conf in docs. The path of ceph.conf sample template moved to ceph-config. Therefore docs needs to be changed to the right directory.
@@ -171,7 +171,7 @@ ceph.conf Configuration ----------------------- The supported method for defining your ceph.conf is to use the ``ceph_conf_overrides`` variable. This allows you to specify configuration options using -an INI format. This variable can be used to override sections already defined in ceph.conf (see: ``roles/ceph-common/templates/ceph.conf.j2``) or to provide +an INI format. This variable can be used to override sections already defined in ceph.conf (see: ``roles/ceph-config/templates/ceph.conf.j2``) or to provide new configuration options. The following sections in ceph.conf are supported: [global], [mon], [osd], [mds] and [rgw]. An example::
Add unit test for nested_kl_divergence in PPO This test makes sure that nested_kl_divergence works with nested distributions of different shapes.
@@ -77,6 +77,22 @@ class PPOUtilsTest(parameterized.TestCase, tf.test.TestCase): kl_divergence_ = self.evaluate(kl_divergence) self.assertAllClose(expected_kl_divergence, kl_divergence_) + # test for distributions with different shapes + one_reshaped = tf.constant([[1.0]] * 3, dtype=tf.float32) + dist_neg_one_reshaped = tfp.distributions.Normal( + loc=-one_reshaped, scale=one_reshaped) + dist_one_reshaped = tfp.distributions.Normal( + loc=one_reshaped, scale=one_reshaped) + + nested_dist1 = [dist_zero, [dist_neg_one_reshaped, dist_one]] + nested_dist2 = [dist_one, [dist_one_reshaped, dist_zero]] + kl_divergence = ppo_utils.nested_kl_divergence( + nested_dist1, nested_dist2) + expected_kl_divergence = 3 * 3.0 # 3 * (0.5 + (2.0 + 0.5)) + + kl_divergence_ = self.evaluate(kl_divergence) + self.assertAllClose(expected_kl_divergence, kl_divergence_) + def test_get_distribution_params(self): ones = tf.ones(shape=[2], dtype=tf.float32) distribution = (tfp.distributions.Categorical(logits=ones),
client: update isolated This is to have crrev.com/c/2022482 for the task like
@@ -114,7 +114,7 @@ ISOLATED_CLIENT_DIR = u'ic' # Take revision from # https://ci.chromium.org/p/infra-internal/g/infra-packagers/console ISOLATED_PACKAGE = 'infra/tools/luci/isolated/${platform}' -ISOLATED_REVISION = 'git_revision:0c630715fe1307ec34118ec6d4160de489e0eca5' +ISOLATED_REVISION = 'git_revision:998a75a97562b8c40feec956be8b4274d96e2eea' # Keep synced with task_request.py CACHE_NAME_RE = re.compile(r'^[a-z0-9_]{1,4096}$')
Reduce lower limit of tx per block seen at a certain fee rate, so estimates are produced sooner Reduce lower limit of tx per block seen at a certain fee rate, so that we produce estimates sooner
@@ -29,7 +29,9 @@ LONG_DECAY = 0.99931 HALF_SUCCESS_PCT = 0.6 # Require 60 % success rate for target confirmations SUCCESS_PCT = 0.85 # Require 85 % success rate for target confirmations DOUBLE_SUCCESS_PCT = 0.95 # Require 95 % success rate for target confirmations -SUFFICIENT_FEE_TXS = 0.1 # Require an avg of 0.1 tx in the combined fee rate bucket per block to have stat significance + +# Require an avg of SUFFICIENT_FEE_TXS tx in the combined fee rate bucket per block to have stat. significance +SUFFICIENT_FEE_TXS = 0.01 FEE_ESTIMATOR_VERSION = 1
Make RuleTest use ABCMeta This will prevent child classes to be instantiated unless they implement all abstract methods, leading to a more descriptive error message.
import unittest -from abc import abstractmethod +from abc import ABCMeta, abstractmethod from typing import Callable, Dict, Iterable, List, NamedTuple, Tuple from tests.helpers import MockMessage @@ -12,7 +12,7 @@ class DisallowedCase(NamedTuple): n_violations: int -class RuleTest(unittest.TestCase): +class RuleTest(unittest.TestCase, metaclass=ABCMeta): """ Abstract class for antispam rule test cases.
Update Example.py Bug fix
@@ -108,6 +108,7 @@ class VoiceState: self.start_time = datetime.datetime.now() self.current = self.playlist[0] + self.votes = [] self.votes.append({ 'user' : self.current.requester, 'value' : 'keep' }) await self.bot.send_message(self.current.channel, 'Now playing ' + str(self.current))
Calculate how many items to assign to sales order Do not over-allocate by default
@@ -1658,7 +1658,7 @@ function allocateStockToSalesOrder(order_id, line_items, options={}) { var available = Math.max((data.quantity || 0) - (data.allocated || 0), 0); // Remaining quantity to be allocated? - var remaining = opts.quantity || available; + var remaining = Math.max(line_item.quantity - line_item.shipped - line_item.allocated, 0); // Maximum amount that we need var desired = Math.min(available, remaining);
droplets: Disable SSH password authentication. We cannot disable this in base droplet as DigitalOcean overrides the file and enable password authentication during droplet creation.
@@ -124,6 +124,8 @@ def set_user_data(username: str, userkey_dicts: List[Dict[str, Any]]) -> str: {setup_zulipdev_ssh_keys} {setup_root_ssh_keys} +sed -i "s/PasswordAuthentication yes/PasswordAuthentication no/g" /etc/ssh/sshd_config +service ssh restart {hostname_setup} su -c '{server_repo_setup}' zulipdev su -c '{python_api_repo_setup}' zulipdev
[quant][onnx] Mark upsample_nearest2d, sigmoid and reshape as no scale Summary: Pull Request resolved: return the scale of the input tensor Test Plan: python test/onnx/test_pytorch_onnx_caffe2_quantized.py Imported from OSS
@@ -40,7 +40,10 @@ double getScaleFromInput(Node* input_node) { "aten::slice", "aten::avg_pool2d", "quantized::cat", - "prim::ListConstruct"}; + "prim::ListConstruct", + "aten::upsample_nearest2d", + "aten::sigmoid", + "aten::reshape"}; if (input_name == "aten::quantize_per_tensor") { TORCH_CHECK( input_node->inputs().size() > 1,
events api: Delete incorrect parameter in response. max_logo_file_size isn't included in the responses of GET /events api but is mentioned in the api documentation. Hence it is deleted.
@@ -3520,10 +3520,6 @@ paths: * "D" means the logo is the default Zulip logo. * "U" means uploaded by an organization administrator. - max_logo_file_size: - type: integer - description: | - The maximum file size allowed for the uploaded organization logos. bot_domain: type: string description: |
Fix warning Fix another warning when converting from numpy to torch in collate function
@@ -1656,7 +1656,9 @@ class TimeSeriesDataSet(Dataset): ) target_scale.append(scale) else: # convert to tensor - target_scale = torch.tensor([batch[0]["target_scale"] for batch in batches], dtype=torch.float) + target_scale = torch.from_numpy( + np.array([batch[0]["target_scale"] for batch in batches], dtype=np.float32), + ) # target and weight if isinstance(batches[0][1][0], (tuple, list)):
[utils] Remove unused code. Remove log_stack_failure_recursive from pcluster.utils Remove _log_cfn_event from pcluster.utils
@@ -219,36 +219,6 @@ def verify_stack_status(stack_name, waiting_states, successful_states): return status in successful_states -def log_stack_failure_recursive(stack_name, failed_states=None, indent=2): - """Log stack failures in recursive manner, until there is no substack layer.""" - if not failed_states: - failed_states = ["CREATE_FAILED"] - - from pcluster.aws.aws_api import AWSApi # pylint: disable=import-outside-toplevel - - events = AWSApi.instance().cfn.get_stack_events(stack_name) - for event in events: - if event.get("ResourceStatus") in failed_states: - _log_cfn_event(event, indent) - if event.get("ResourceType") == "AWS::CloudFormation::Stack": - # Sample substack error: - # "Embedded stack arn:aws:cloudformation:us-east-2:704743599507:stack/ - # parallelcluster-fsx-fail-FSXSubstack-65ITLJEZJ0DQ/ - # 3a4ecf00-51e7-11ea-8e3e-022fd555c652 was not successfully created: - # The following resource(s) failed to create: [FileSystem]." - substack_error = re.search(".+ (arn:aws:cloudformation[^ ]+) ", event.get("ResourceStatusReason")) - substack_name = substack_error.group(1) if substack_error else None - if substack_name: - log_stack_failure_recursive(substack_name, indent=indent + 2) - - -def _log_cfn_event(event, indent): - """Log failed CFN events.""" - from pcluster.aws.aws_api import AWSApi # pylint: disable=import-outside-toplevel - - print("{}- {}".format(" " * indent, AWSApi.instance().cfn.format_event(event))) - - def get_templates_bucket_path(): """Return a string containing the path of bucket.""" region = get_region()
python/module_py.mako: fix wrong type annotation TN:
@@ -783,7 +783,7 @@ class ${root_astnode_name}(object): """ Cache for fields and argument-less properties. - :type: dict[int, T] + :type: dict[str, T] """ @property
doc: warn that tutorials are based on develop branch fixes
@@ -54,6 +54,9 @@ If you use `pyannote.audio` please use the following citations. ## Tutorials +:warning: These tutorials assumes that you installed the [`develop` branch](https://github.com/pyannote/pyannote-audio/issues/145) of `pyannote.audio`. +:warning: They are most likely [broken](https://github.com/pyannote/pyannote-audio/issues/151) in `pyannote.audio 1.x`. + * [Feature extraction](tutorials/feature_extraction) * Models * [Training LSTM-based speech activity detection](tutorials/models/speech_activity_detection)
typo in the second export Replace := with = in the export SCENARIO_RUNNER_PATH
@@ -10,7 +10,7 @@ The Node setup is visualized [here](../docs/images/ad_demo.png "AD Demo Node Set ## Startup export PYTHONPATH=$PYTHONPATH:<path_to_carla>/PythonAPI/carla-<carla_version_and_arch>.egg:<path_to_carla>/PythonAPI/carla/ - export SCENARIO_RUNNER_PATH:=<path_to_scenario_runner> + export SCENARIO_RUNNER_PATH=<path_to_scenario_runner> roslaunch carla_ad_demo carla_ad_demo_with_rviz.launch ### Modes
Adding codecov configuration Added minimal configuration
@@ -20,6 +20,11 @@ jobs: python -m pip install tox - name: Run tests run: tox + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: true release: needs: test
site_tests.test_search_where_title: Use assertIn instead of assertTrue This way unittest will produce a more descriptive error message by showing AssertionError: 'wiki' not found in <page title> instead of AssertionError: False is not true
@@ -1505,7 +1505,7 @@ class SearchTestCase(DefaultSiteTestCase): get_redirects=True, where='title'): self.assertIsInstance(hit, pywikibot.Page) self.assertEqual(hit.namespace(), 0) - self.assertTrue('wiki' in hit.title().lower()) + self.assertIn('wiki', hit.title().lower()) except pywikibot.data.api.APIError as e: if e.code in ('search-title-disabled', 'gsrsearch-title-disabled'): raise unittest.SkipTest(
Take num of items returned in current job for days with no gazette monitor Without considering the number of items returned in the current job, we got false failures when we get items in the latest job.
@@ -61,8 +61,10 @@ class ComparisonBetweenSpiderExecutionsMonitor(Monitor): .filter(JobStats.spider == self.data.spider.name) .all() ) - extracted_in_period = sum( - [stat.job_stats.get("item_scraped_count", 0) for stat in job_stats] + n_scraped_items = self.data.stats.get("item_scraped_count", 0) + extracted_in_period = ( + sum([stat.job_stats.get("item_scraped_count", 0) for stat in job_stats]) + + n_scraped_items ) self.assertNotEqual( extracted_in_period,
Update Tenable_sc.yml Update argument for tenable-sc-get-all-scan-results
@@ -1043,8 +1043,8 @@ script: default: false defaultValue: 'false' description: |- - Whether to return only manageable alerts. Returns both usable and - manageable by default. + Filter only manageable alerts. By default, returns both usable and + manageable alerts. isArray: false name: manageable predefined: @@ -1053,7 +1053,7 @@ script: required: false secret: false deprecated: false - description: Get all scan results in Tenable.sc + description: Returns all scan results in Tenable.sc execution: false name: tenable-sc-get-all-scan-results outputs:
Update Utilities.py Fix bug in _get_a_plus
@@ -337,14 +337,14 @@ def _bi_variate_normal_pdf(x1, x2, rho): def _get_a_plus(a): eig_val, eig_vec = np.linalg.eig(a) - q = np.array(eig_vec) - x_diagonal = np.array(np.diag(np.maximum(eig_val, 0))) + q = np.matrix(eig_vec) + x_diagonal = np.matrix(np.diag(np.maximum(eig_val, 0))) return q * x_diagonal * q.T def _get_ps(a, w=None): - w05 = np.array(w ** .5) + w05 = np.matrix(w ** .5) return w05.I * _get_a_plus(w05 * a * w05) * w05.I @@ -352,7 +352,7 @@ def _get_ps(a, w=None): def _get_pu(a, w=None): a_ret = np.array(a.copy()) a_ret[w > 0] = np.array(w)[w > 0] - return np.array(a_ret) + return np.matrix(a_ret) def _nn_coord(x, k):
fix: remove base_url use case not supported by requests, so not needed anymore
"engine": "InnoDB", "field_order": [ "provider_name", - "base_url", "cb_00", "openid_configuration", "sb_client_credentials_section", "label": "Scopes", "options": "OAuth Scope" }, - { - "fieldname": "base_url", - "fieldtype": "Data", - "label": "Base URL" - }, { "fieldname": "authorization_uri", "fieldtype": "Data", "link_fieldname": "connected_app" } ], - "modified": "2020-09-27 19:29:17.835067", + "modified": "2020-09-27 20:04:02.303982", "modified_by": "Administrator", "module": "Integrations", "name": "Connected App",
go: enable cgo by default Enable `--golang-cgo-enabled` by default. The option has not been released yet so it is safe to change the default now. [ci skip-rust] [ci skip-build-wheels]
@@ -203,8 +203,7 @@ class GolangSubsystem(Subsystem): ) cgo_enabled = BoolOption( - "--cgo-enabled", - default=False, + default=True, help=softwrap( """\ Enable Cgo support, which allows Go and C code to interact. This option must be enabled for any
[bugfix] Re-enable TestCategoryFromWikibase tests wikinews item for it was removed from Q6939656. Use wikisource instead.
@@ -3567,8 +3567,8 @@ class TestCategoryFromWikibase(DefaultSiteTestCase): """Test page_from_repository method.""" sites = { - 'it.wb': { - 'family': 'wikinews', + 'it.ws': { + 'family': 'wikisource', 'code': 'it', 'result': 'Categoria:2016', },
fixed a typo for mapping rule of hicexplorer.snakefile The snakepipes were crashing because `{params.idx}` in `rule map_fastq_single_end:` were not properly named.
@@ -24,7 +24,7 @@ rule map_fastq_single_end: conda: CONDA_HIC_ENV shell: """ echo "mapping {input}" > {log.out} - bwa mem -A1 -B4 -E50 -L0 -t {threads} {params.index} {input} 2>> {log.err} | samtools view -bo {output} - + bwa mem -A1 -B4 -E50 -L0 -t {threads} {params.idx} {input} 2>> {log.err} | samtools view -bo {output} - """
Fix race condition in time-to-start metric by recording the start time with before_task_publish instead of after_task_publish
import datetime -from celery.signals import after_task_publish, task_prerun +from celery.signals import before_task_publish, task_prerun from django.core.cache import cache @@ -32,7 +32,7 @@ class TimeToStartTimer(object): cache.delete(self._cache_key) -@after_task_publish.connect +@before_task_publish.connect def celery_add_time_sent(headers=None, body=None, **kwargs): info = headers if 'task' in headers else body task_id = info['id']
Fix for StandardPipelineRenderer adds a settings check to make behavior compatible with the StandardPixelRenderer
@@ -1107,7 +1107,7 @@ class StandardPipelineRenderer(RendererBase): sx = float(win_wd) / scale_x sy = float(win_ht) / scale_y if (sx < 1.0) or (sy < 1.0): - #self.logger.warning("new scale would exceed max/min; scale unchanged") + if self.viewer.settings.get('sanity_check_scale', True): raise RenderError("new scale would exceed pixel max; scale unchanged") data_off = self.viewer.data_off
doc: Fix typo with SM_MODEL_DIR, missing quotes The documentation incorrectly specifies SM_CHANNEL_TRAIN instead of SM_MODEL_DIR for the directory where models are stored. There is also example code missing quotation marks.
@@ -58,7 +58,7 @@ The training script is very similar to a training script you might run outside o For the exhaustive list of available environment variables, see the `SageMaker Containers documentation <https://github.com/aws/sagemaker-containers#list-of-provided-environment-variables-by-sagemaker-containers>`_. -A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to ``SM_CHANNEL_TRAIN`` so that it can be deployed for inference later. +A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to ``SM_MODEL_DIR`` so that it can be deployed for inference later. Hyperparameters are passed to your script as arguments and can be retrieved with an ``argparse.ArgumentParser`` instance. For example, a training script might start with the following: @@ -544,7 +544,7 @@ For example: batch_output = 's3://{}/{}/results'.format(bucket, prefix) # The location to store the results - tf_transformer = tf_estimator.transformer(instance_count=1, instance_type='ml.m4.xlarge, output_path=batch_output) + tf_transformer = tf_estimator.transformer(instance_count=1, instance_type='ml.m4.xlarge', output_path=batch_output) To use a model trained outside of SageMaker, you can package the model as a SageMaker model, and call the ``transformer`` method of the SageMaker model. @@ -557,7 +557,7 @@ For example: batch_output = 's3://{}/{}/results'.format(bucket, prefix) # The location to store the results - tf_transformer = tensorflow_serving_model.transformer(instance_count=1, instance_type='ml.m4.xlarge, output_path=batch_output) + tf_transformer = tensorflow_serving_model.transformer(instance_count=1, instance_type='ml.m4.xlarge', output_path=batch_output) For information about how to package a model as a SageMaker model, see :ref:`overview:BYO Model`. When you call the ``tranformer`` method, you specify the type and number of instances to use for the batch transform job, and the location where the results are stored in S3.
Add DataRequired validators to relevant fields WTForms DataRequired() validators fit with the form validation workflow that each function follows, with <form>.validate_on_submit(). Closes
@@ -57,12 +57,12 @@ class NewScopeForm(FlaskForm): class ImportScopeForm(FlaskForm): - scope = TextAreaField("Scope Import") + scope = TextAreaField("Scope Import", validators=[DataRequired()]) submit = SubmitField("Import Scope") class ImportBlacklistForm(FlaskForm): - scope = TextAreaField("Blacklist Import") + scope = TextAreaField("Blacklist Import", validators=[DataRequired()]) submit = SubmitField("Import Blacklist") @@ -75,7 +75,7 @@ class ScopeToggleForm(FlaskForm): class ServicesUploadForm(FlaskForm): - serviceFile = FileField('Select a file to upload') + serviceFile = FileField('Select a file to upload', validators=[DataRequired()]) uploadFile = SubmitField('Upload Services File')
Provides group_names for use with provider_network plugin This patch will provide group_names to the provider network object, which will then be used to compare group_binds for selective provider network placement across nodes. Partial-Bug: Depends-On:
provider_networks: "{{ provider_networks }}" bind_prefix: "{{ provider_network_bind_prefix | default('') }}" is_metal: "{{ is_metal }}" + group_names: "{{ group_names }}" register: pndata when: neutron_provider_networks is not defined tags:
Optimizatiom: Use -O3 with LTO for platforms without static libpython * On Windows, there is pseudo static link library by Anaconda, that doesn't count, and Nuitka-Python is not affected. * This will e.g. make at least use of better LTO on e.g. Fedora Python where there is no static libpython available.
@@ -460,7 +460,14 @@ if env.gcc_mode and env.lto_mode: if env.debug_mode: env.Append(LINKFLAGS=["-Og"]) else: - env.Append(LINKFLAGS=["-O2" if env.lto_mode and not nuitka_python else "-O3"]) + # For LTO with static libpython combined, there are crashes with Python core + # being inlined, so we must refrain from that. On Windows there is no such + # thing, and Nuitka-Python is not affected. + env.Append( + LINKFLAGS=[ + "-O3" if nuitka_python or win_target or not static_libpython else "-O2" + ] + ) # Avoid them as appearing to be different files. TODO: Find out which # clang version has this, clang-8 does not.
OpenOLT reconciliation state machine and logical device reconciliation
@@ -116,6 +116,10 @@ class OpenoltDevice(object): self.heartbeat_signature = None self.heartbeat_thread.start() + if is_reconciliation: + # Put state machine in state up + reactor.callFromThread(self.olt_up, reconciliation=True) + self.log.debug('openolt-device-created', device_id=self.device_id) def process_indications(self): @@ -171,7 +175,9 @@ class OpenoltDevice(object): def olt_indication_up(self, event): olt_indication = event.kwargs.get('ind', None) - self.log.debug("olt indication", olt_ind=olt_indication) + is_reconciliation = event.kwargs.get('reconciliation', False) + self.log.debug("olt indication", olt_ind=olt_indication, + reconciliation=is_reconciliation) device = self.adapter_agent.get_device(self.device_id) @@ -200,6 +206,9 @@ class OpenoltDevice(object): else: # logical device already exists self.logical_device_id = device.parent_id + if is_reconciliation: + self.adapter_agent.reconcile_logical_device( + self.logical_device_id) # Update phys OF device device.parent_id = self.logical_device_id
Add test for operating reserves check that sr_provided is greater than or equal to sr_required
@@ -209,6 +209,11 @@ class TestOffGridSystem(ResourceTestCaseMixin, TestCase): inputs['LoadProfile']["min_load_met_pct"], "Load met pct is less than required pct.") + # Check that SR provided is greater than SR required + self.assertGreaterEqual(sum(outputs['LoadProfile']['total_sr_provided']), + sum(outputs['LoadProfile']['total_sr_required']), + "Total SR provided is less than required SR.") + # TODO: check that lcoe components add up to 100% except Exception as e: error_msg = None
ENH: use new utility Use new column formatting utility in Instrument string output.
@@ -1104,23 +1104,8 @@ class Instrument(object): output_str += 'Number of Times: {:d}\n'.format(len(self.index)) output_str += 'Number of variables: {:d}\n'.format(num_vars) - if num_vars <= max_vars: output_str += '\nVariable Names:\n' - num = len(self.variables) // 3 - - # Print out groups of three variables at a time on one line - for i in np.arange(num): - output_str += self.variables[3 * i].ljust(30) - output_str += self.variables[3 * i + 1].ljust(30) - output_str += self.variables[3 * i + 2].ljust(30) + '\n' - - # Print out remaining variables one at a time on one line - for i in np.arange(len(self.variables) - 3 * num): - output_str += self.variables[i + 3 * num].ljust(30) - output_str += '\n' - else: - output_str += "\nSee variable names using " - output_str += "print(inst.variables)\n" + output_str += utils._core.fmt_output_in_cols(self.variables) else: output_str += 'No loaded data.\n'
Don't use mmap on local files by default Fixes
@@ -71,9 +71,17 @@ def futures_executor(items, function, accumulator, workers=1, status=True, unit= return accumulator -def _work_function(item, flatten=False, savemetrics=False, **_): +def _work_function(item, flatten=False, savemetrics=False, mmap=False, **_): dataset, fn, treename, chunksize, index, processor_instance = item - file = uproot.open(fn) + if mmap: + localsource = {} + else: + opts = dict(uproot.FileSource.defaults) + opts.update({'parallel': None}) + def localsource(path): + return uproot.FileSource(path, **opts) + + file = uproot.open(fn, localsource=localsource) tree = file[treename] df = LazyDataFrame(tree, chunksize, index, flatten=flatten) df['dataset'] = dataset
silence unreachable code warnings Summary: Stack: &nbsp;&nbsp;&nbsp;&nbsp;:black_circle:&nbsp; **#15036 silence unreachable code warnings**&nbsp;&nbsp;[:yellow_heart:](https://our.intern.facebook.com/intern/diff/D13411100/) Pull Request resolved:
@@ -901,7 +901,6 @@ TypePtr getTypePtr() { " could not be converted to any of the known types { ", C10_FORALL_TYPES(TYPE_STR) "}"); #undef TYPE_STR - return nullptr; } template<> inline TypePtr getTypePtr<at::Tensor>() { return DynamicType::get(); }
change divide_polyline& divide_polyline_by_length
@@ -298,6 +298,7 @@ class Polyline(Primitive): else: looped_pts = [points[i] for i in range(id1, len(points))] + points[1:id2+1] split_polylines.append(Polyline(looped_pts)) + if self.is_closed() and not corner_ids: return [Polyline(self.points)] @@ -332,33 +333,7 @@ if self.is_closed() and not corner_ids: """ segment_length = self.length/num_segments - num_pts = int(round(self.length/segment_length)) - total_length = [0, 0] - if hasattr(self, 'points'): - division_pts = [self.points[0]] - new_polyline = self - else: - division_pts = [self.start] - new_polyline = Polyline([self.start, self.end]) - - for i in range(num_pts): - for i_ln, line in enumerate(new_polyline.lines): - total_length.append(total_length[-1] + line.length) - if total_length[-1] > segment_length: - amp = (segment_length - total_length[-2]) / line.length - new_pt = line.start + line.vector.scaled(amp) - division_pts.append(new_pt) - total_length = [0, 0] - remaining_pts = new_polyline.points[i_ln+2:] - new_polyline = Polyline([new_pt, line.end] + remaining_pts) - break - if len(division_pts) == num_pts+1: - break - - if len(division_pts) != num_pts+1: - division_pts.append(new_polyline.points[-1]) - - return division_pts + return self.divide_polyline_by_length(segment_length) def divide_polyline_by_length(self, length): """Splits a polyline in segments of a given length @@ -374,8 +349,13 @@ if self.is_closed() and not corner_ids: """ num_pts = int(round(self.length/length)) total_length = [0, 0] + if hasattr(self, 'points'): division_pts = [self.points[0]] new_polyline = self + else: + division_pts = [self.start] + new_polyline = Polyline([self.start, self.end]) + new_polyline = self for i in range(num_pts): for i_ln, line in enumerate(new_polyline.lines): total_length.append(total_length[-1] + line.length)
fix url bug for word2vec fix url bug for word2vec
@@ -43,8 +43,6 @@ class TokenEmbedding(nn.Embedding): unknown_token_vector=None, extended_vocab_path=None, trainable=True): - - embedding_name = embedding_name.lower() vector_path = osp.join(EMBEDDING_HOME, embedding_name + ".npz") if not osp.exists(vector_path): # download