message
stringlengths
13
484
diff
stringlengths
38
4.63k
Fix example in README Fixes
@@ -46,10 +46,10 @@ and in the documentation. .. code:: python - #!/usr/bin/python3.5 + #!/usr/bin/python3 - import asyncio import logging + import sys from juju import loop from juju.model import Model @@ -63,26 +63,28 @@ and in the documentation. # Connect to the currently active Juju model await model.connect_current() - # Deploy a single unit of the ubuntu charm, using revision 0 from the - # stable channel of the Charm Store. + try: + # Deploy a single unit of the ubuntu charm, using the latest revision + # from the stable channel of the Charm Store. ubuntu_app = await model.deploy( - 'ubuntu-0', + 'ubuntu', application_name='ubuntu', series='xenial', channel='stable', ) + if '--wait' in sys.argv: + # optionally block until the application is ready + await model.block_until(lambda: ubuntu_app.status == 'active') + finally: # Disconnect from the api server and cleanup. - model.disconnect() + await model.disconnect() def main(): - # Set logging level to debug so we can see verbose output from the - # juju library. - logging.basicConfig(level=logging.DEBUG) + logging.basicConfig(level=logging.INFO) - # Quiet logging from the websocket library. If you want to see - # everything sent over the wire, set this to DEBUG. + # If you want to see everything sent over the wire, set this to DEBUG. ws_logger = logging.getLogger('websockets.protocol') ws_logger.setLevel(logging.INFO)
remove 'unique' from delay_before/after the 'unique' was not working as expected
@@ -123,11 +123,9 @@ mapping: delay_before: type: float required: false - unique: true delay_after: type: float required: false - unique: true mqtt_publish: type: map
fix: correct typo to spell "convention" Fixes
@@ -49,7 +49,7 @@ and communicating it (using the cli provided by commitizen). The reasoning behind it is that is easier to read, and enforces writing descriptive commits. -Besides that, having a convetion on your commits, makes it possible to +Besides that, having a convention on your commits, makes it possible to parse them and use them for something else, like generating automatically the version or a changelog.
Remove relative parameter from export The relativeSDF parameter is not used anymore as the whole export relies on the individual requirements for visual, collision objects etc. Thus, (and as Gazebo does not support relative pathing yet) it is not needed at the moment.
@@ -205,7 +205,7 @@ def frame(framedata, indentation, relative): return "".join(tagger.get_output()) -def inertial(inertialobj, inertialdata, indentation, relative): +def inertial(inertialobj, inertialdata, indentation): """ Simple wrapper for link inertial data. The inertial object is required to determine the position (pose) of the object. @@ -215,10 +215,12 @@ def inertial(inertialobj, inertialdata, indentation, relative): phobos.utils.editing.getCombinedTransform). :param inertialobj: object to be used for absolute pose + :type inertialobj: Blender object. :param inertialdata: data as provided by dictionary (should contain mass and inertia) + :type inertialdata: dict. :param indentation: indentation at current level - :param relative: True for usage of sdf relative pathing + :type indentation: int. :return: str -- writable xml line """ @@ -255,7 +257,7 @@ def inertial(inertialobj, inertialdata, indentation, relative): return "".join(tagger.get_output()) -def collision(collisionobj, collisiondata, indentation, relative, modelname): +def collision(collisionobj, collisiondata, indentation, modelname): """ Simple wrapper for link collision data. The collision object is required to determine the position (pose) of the object. @@ -448,7 +450,7 @@ def geometry(geometrydata, indentation, modelname): return "".join(tagger.get_output()) -def visual(visualobj, linkobj, visualdata, indentation, relative, modelname): +def visual(visualobj, linkobj, visualdata, indentation, modelname): """ Simple wrapper for visual data of links. The visual object is required to determine the position (pose) of the object. @@ -541,7 +543,7 @@ def material(materialdata, indentation): return "".join(tagger.get_output()) -def exportSdf(model, filepath, relativeSDF=False): +def exportSdf(model, filepath): log("Export SDF to " + filepath, "INFO", "exportSdf") filename = os.path.join(filepath, model['name'] + '.sdf') errors = False @@ -651,7 +653,7 @@ def exportSdf(model, filepath, relativeSDF=False): inertialname = link['inertial']['name'] inertialobj = bpy.context.scene.objects[inertialname] xml.write(inertial(inertialobj, link['inertial'], - xml.get_indent(), relativeSDF)) + xml.get_indent())) else: log('No inertial data for "{0}"...'.format(link['name'], "WARNING", @@ -664,8 +666,7 @@ def exportSdf(model, filepath, relativeSDF=False): collisionobj = bpy.context.scene.objects[colliname] xml.write(collision(collisionobj, link['collision'][colkey], - xml.get_indent(), relativeSDF, - modelname)) + xml.get_indent(), modelname)) else: log('No collision data for "{0}"...'.format(link['name'], "WARNING", @@ -682,7 +683,7 @@ def exportSdf(model, filepath, relativeSDF=False): material = model['materials'][visualdata['material']] visualdata['material'] = material xml.write(visual(visualobj, linkobj, visualdata, - xml.get_indent(), relativeSDF, modelname)) + xml.get_indent(), modelname)) else: log('No visual data for "{0}"...'.format(link['name'], "WARNING",
Fix some typos I found one, and flake8 found the rest
@@ -83,7 +83,7 @@ class VirtualArray(awkward.array.base.AwkwardArray): return out def deepcopy(self, generator=None, args=None, kwargs=None, cache=None, persistentkey=None, type=None, nbytes=None, persistvirtual=None): - out = self.copy(generator=generator, args=arge, kwargs=kwargs, cache=cache, persistentkey=persistentkey, type=type, nbytes=nbytes, persistvirtual=persistvirtual) + out = self.copy(generator=generator, args=args, kwargs=kwargs, cache=cache, persistentkey=persistentkey, type=type, nbytes=nbytes, persistvirtual=persistvirtual) out._array = self._util_deepcopy(out._array) if out._setitem is not None: for n in list(out._setitem): @@ -92,19 +92,19 @@ class VirtualArray(awkward.array.base.AwkwardArray): def empty_like(self, **overrides): if isinstance(self.array, self.numpy.ndarray): - return self.numpy.empty_like(array) + return self.numpy.empty_like(self.array) else: return self.array.empty_like(**overrides) def zeros_like(self, **overrides): if isinstance(self.array, self.numpy.ndarray): - return self.numpy.zeros_like(array) + return self.numpy.zeros_like(self.array) else: return self.array.zeros_like(**overrides) def ones_like(self, **overrides): if isinstance(self.array, self.numpy.ndarray): - return self.numpy.ones_like(array) + return self.numpy.ones_like(self.array) else: return self.array.ones_like(**overrides) @@ -371,7 +371,7 @@ class VirtualArray(awkward.array.base.AwkwardArray): def __setitem__(self, where, what): self.array[where] = what if self._type is not None: - self._type = awkward.type.fromarray(array) + self._type = awkward.type.fromarray(self.array) if self._setitem is None: self._setitem = OrderedDict() self._setitem[where] = what @@ -379,7 +379,7 @@ class VirtualArray(awkward.array.base.AwkwardArray): def __delitem__(self, where): del self.array[where] if self._type is not None: - self._type = awkward.type.fromarray(array) + self._type = awkward.type.fromarray(self.array) if self._setitem is not None and where in self._setitem: del self._setitem if self._delitem is None:
[modules/memory] Add parameter to only show used memory No change to default behaviour, but adds boolean to only display used rather than used, total and percentage. To only show used memory: p memory.usedonly=1
"""Displays available RAM, total amount of RAM and percentage available. Parameters: - * cpu.warning : Warning threshold in % of memory used (defaults to 80%) - * cpu.critical: Critical threshold in % of memory used (defaults to 90%) + * ram.warning : Warning threshold in % of memory used (defaults to 80%) + * ram.critical: Critical threshold in % of memory used (defaults to 90%) + * ram.usedonly: Only show the amount of RAM in use. """ try: @@ -28,6 +29,8 @@ class Module(bumblebee.engine.Module): def memory_usage(self, widget): used = self._mem.total - self._mem.available + if bool(self.parameter("usedonly", 0)) == 1: + return bumblebee.util.bytefmt(used) return "{}/{} ({:05.02f}%)".format( bumblebee.util.bytefmt(used), bumblebee.util.bytefmt(self._mem.total),
Fixing race condition that caused all pages to be rendered. Prior to encountering this condition, handled by only rendering first page and no other. Also clearing out errors.
props: ['defaultFile'], data: () => ({ supportsPDFs: true, - scale: null, - timeout: null, isFullscreen: false, - error: false, progress: 0, - totalPages: 0, - pageHeight: 0, - pageWidth: 0, + scale: null, + timeout: null, + totalPages: null, + pageHeight: null, + pageWidth: null, }), computed: { fullscreenAllowed() { } else { this.isFullscreen = !this.isFullscreen; } - this.setupInitialPageScale(); }, zoomIn() { this.scale += 0.1; getPage(pageNum) { return this.pdfDocument.getPage(pageNum); }, - // get the page dimensions. By default, uses the first page - setupInitialPageScale() { - const pageMargin = 5; - this.getPage(1).then( - firstPage => { - const pdfPageWidth = firstPage.view[2]; - const isDesktop = this.windowSize.breakpoint >= 5; - - if (isDesktop) { - this.scale = 1; - } else { - this.scale = (this.elSize.width - 2 * pageMargin) / pdfPageWidth; - } - }, - error => { - this.error = true; - } - ); - }, startRender(pdfPage) { // use a promise because this also calls render, allowing us to cancel return new Promise((resolve, reject) => { Object.keys(this.pdfPages).forEach(pageNum => { // toggle between hide and show to re-render the page this.hidePage(Number(pageNum)); - this.showPage(Number(pageNum)); }); - this.checkPages(); } this.checkPages(); }, }); } - this.loadPdfPromise = PDFJSLib.getDocument(this.defaultFile.storage_url); + const loadPdfPromise = PDFJSLib.getDocument(this.defaultFile.storage_url); // pass callback to update loading bar - this.loadPdfPromise.onProgress = loadingProgress => { + loadPdfPromise.onProgress = loadingProgress => { this.progress = loadingProgress.loaded / loadingProgress.total; }; - this.loadPdfPromise.then(pdfDocument => { - if (PDFJSLib.FormatError) { - this.error = true; - return; - } - + this.prepComponentData = loadPdfPromise.then(pdfDocument => { this.pdfDocument = pdfDocument; this.totalPages = pdfDocument.numPages; this.pdfPages = {}; - this.setupInitialPageScale(); + return this.getPage(1).then(firstPage => { + const pageMargin = 5; + const pdfPageWidth = firstPage.view[2]; + const isDesktop = this.windowSize.breakpoint >= 5; + + if (isDesktop) { + // if desktop, use default page's default scale size + this.scale = 1; + } else { + // if anything else, use max width + this.scale = (this.elSize.width - 2 * pageMargin) / pdfPageWidth; + } + + // set default height and width properties, used in checkPages + const initialViewport = firstPage.getViewport(this.scale); + this.pageHeight = initialViewport.height; + this.pageWidth = initialViewport.width; + }); }); }, mounted() { // Retrieve the document and its corresponding object + this.prepComponentData.then(() => { this.$emit('startTracking'); this.checkPages(); + }); // progress tracking const self = this;
Regenerate requirements/prod.txt Based on advice from Brandon I only re-compiled the requirements for robot.
@@ -29,7 +29,6 @@ cryptography==38.0.1 # -r requirements/prod.in # authlib # pyjwt - # secretstorage defusedxml==0.7.1 # via -r requirements/prod.in docutils==0.16 @@ -54,10 +53,6 @@ idna==3.4 # snowfakery importlib-metadata==5.0.0 # via keyring -jeepney==0.8.0 - # via - # keyring - # secretstorage jinja2==3.1.2 # via # -r requirements/prod.in @@ -112,7 +107,7 @@ requests-futures==1.0.0 # via -r requirements/prod.in rich==12.6.0 # via -r requirements/prod.in -robotframework==4.1.3 +robotframework==6.0 # via # -r requirements/prod.in # robotframework-lint @@ -138,8 +133,6 @@ salesforce-bulk==2.2.0 # via -r requirements/prod.in sarge==0.1.7.post1 # via -r requirements/prod.in -secretstorage==3.3.3 - # via keyring selenium==3.141.0 # via # -r requirements/prod.in
specify python3 when creating virtualenv Lemur is developed against Python3.5. If you do not specify the Python version it is possible the virtualenv will be built on a different version.
@@ -59,7 +59,7 @@ Create the virtual environment, activate it and enter the Lemur's directory: .. code-block:: bash - $ virtualenv lemur + $ virtualenv -p python3 lemur $ source /www/lemur/bin/activate $ cd lemur
Update desktop-guide.rst Instructions for Mac are different than Windows/Linux. Added instructions for Mac.
@@ -31,6 +31,13 @@ To access a new server from your desktop app environment: 4. In the **URL** field, enter the complete URL of the server that you want to connect to. Must begin with either ``http://`` or ``https://``. 5. Click **Add**. +To access a new server from your Mac desktop app environment: + +1. On the menu bar, go to **Mattermost > Sign into Another Server**. +2. In the **Server Display Name** field, enter the name that you want for the tab. +4. In the **Server URL** field, enter the complete URL of the server that you want to connect to. Must begin with either ``http://`` or ``https://``. +5. Click **Add**. + Editing Servers ~~~~~~~~~~~~~~~
Fixed the link in 'pinned dependencies' page. >There is additional documentation on this pinning scheme in [the conda docs](https://docs.conda.io/projects/conda-build/en/latest/resources/variants.html#build-variants) 'the conda docs' now point to
@@ -64,7 +64,7 @@ If a package is not pinned in `conda-forge-pinning <https://github.com/conda-for ignore_run_exports: - gmp -There is additional documentation on this pinning scheme in `the conda docs <https://docs.conda.io/projects/conda-build/en/latest/source/variants.html#build-variants>`_. +There is additional documentation on this pinning scheme in `the conda docs <https://docs.conda.io/projects/conda-build/en/latest/resources/variants.html#build-variants>`_. Specifying run_exports
Fixed typo Renamed commando to command which was mistype.
@@ -10,18 +10,18 @@ def main(): elif x == "3": exit() else: - print("Invalid!") + print("Invalid! option") def cancel(): - comando("shutdown -a") + command("shutdown -a") def shutdown(): t = int(input("how long to shut down the computer:\n")) t = str(t * 60) - cmd = "shutdown -s -f -t " + (t) - comando(cmd) + cmd = "shutdown -s -f -t " + t + command(cmd) def command(cmd):
Accelerate Tests fix circleci config.yml
@@ -37,10 +37,6 @@ jobs: - run: command: | mkdir screenshots - - run: - command: | - . venv/bin/activate - python -m pytest test/local - run: command: | . venv/bin/activate
adding IoT resources Adding IoT references to tools, blogs, videos, and testing guides.
- [OWASP Internet of Things Project](https://www.owasp.org/index.php/OWASP_Internet_of_Things_Project) - [OWASP IoT Testing Guides](https://www.owasp.org/index.php/IoT_Testing_Guides) +## IoT Hacking Communities + +- [IoT Village](https://www.iotvillage.org/) +- [BuildItSecure.ly](http://builditsecure.ly/) +- [Secure Internet of Things Project (Stanford)](http://iot.stanford.edu/people.html) + ## Interesting Blogs - <http://iotpentest.com/>
Update data-drift.md updated image names
@@ -46,7 +46,7 @@ The default report includes 4 components. All plots are interactive. The report returns **the share of drifting features** and an aggregate **Dataset Drift** result. For example: -![](<../.gitbook/assets/Screenshot%202021-09-08%20at%2011.13.40.png>) +![](<../.gitbook/assets/reports_data_drift_summary.png>) Dataset Drift sets a rule on top of the results of the statistical tests for individual features. By default, Dataset Drift is detected if at least 50% of features drift at a 0.95 confidence level.&#x20; @@ -58,7 +58,7 @@ To set different Dataset Drift conditions, you can define [custom options](../.. The table shows the drifting features first, sorting them by P-value. You can also choose to sort the rows by the feature name or type. -![](../.gitbook/assets/data_drift.png) +![](../.gitbook/assets/reports_data_drift_table.png) ### 3. Data Drift by Feature @@ -67,13 +67,13 @@ By clicking on each feature, you can explore the values mapped in a plot.&#x20; * The dark green line is the **mean**, as seen in the reference dataset.&#x20; * The green area covers **one standard deviation** from the mean.&#x20; -![](../.gitbook/assets/data_drift_by_feature.png) +![](../.gitbook/assets/reports_data_drift_drift_by_feature.png) ### 4. Data Distribution by Feature You can also zoom on distributions to understand what has changed. -![](../.gitbook/assets/data_distr_by_feature.png) +![](../.gitbook/assets/reports_data_drift_distr_by_feature.png) {% hint style="info" %} To change the bins displayed, you can define [custom options](../customization/options-for-data-target-drift.md).
Version bump ERROR 404, plugin version not found
@@ -7,7 +7,7 @@ NO_BPY = int(os.environ.get('NO_BPY', '0')) bl_info = { "name": "SourceIO", "author": "RED_EYE, ShadelessFox, Syborg64", - "version": (4, 0, 3), + "version": (4, 0, 4), "blender": (2, 80, 0), "location": "File > Import-Export > SourceEngine assets", "description": "GoldSrc/Source1/Source2 Engine assets(.mdl, .bsp, .vmt, .vtf, .vmdl_c, .vwrld_c, .vtex_c)"
update update on about page Whys is it *the* NHS BSA and not *the* NHS Digital?!
@@ -37,7 +37,7 @@ cross-platform testing.</p> <h3 id="sources">Data sources</h3> -<p><strong>Prescribing data</strong> is from the monthly files published by <a href="https://www.nhsbsa.nhs.uk/information-services-portal-isp">NHS Business Service Authority</a>, used under the terms of the Open Government Licence.</p> +<p><strong>Prescribing data</strong> is from the monthly files published by the <a href="https://www.nhsbsa.nhs.uk/information-services-portal-isp">NHS Business Service Authority</a>, used under the terms of the Open Government Licence.</p> <p><strong>Practice list sizes</strong> for August 2010 - September 2016 are from the <a href="https://apps.nhsbsa.nhs.uk/infosystems/welcome">NHS Business Service Authority's Information Portal</a>, used under the terms of the Open Government Licence. From October 2016, practice list sizes are from <a href="http://content.digital.nhs.uk/article/2021/Website-Search?q=number+of+patients+registered+at+a+gp+practice&go=Go&area=both">NHS Digital</a>, used under the terms of the Open Government Licence. ASTRO-PU and STAR-PUs are calculated from list sizes, based on standard formulas.</p>
we already have a localized timezone aside: I'm pretty sure we can drop `tz_abbrev` and just use `timezone` directly everywhere, assuming get_timezone_for_user(...) is the same as get_timezone_for_user(...).localize(datetime.utcnow()).tzinfo which I suspect is the case.
@@ -209,9 +209,6 @@ class CaseDataView(BaseProjectReportSectionView): else: dynamic_properties = None - the_time_is_now = datetime.utcnow() - tz_abbrev = timezone.localize(the_time_is_now).tzname() - product_name_by_id = { product['product_id']: product['name'] for product in SQLProduct.objects.filter(domain=self.domain).values('product_id', 'name').all() @@ -252,7 +249,7 @@ class CaseDataView(BaseProjectReportSectionView): "dynamic_properties_as_table": dynamic_properties, "show_properties_edit": show_properties_edit, "timezone": timezone, - "tz_abbrev": tz_abbrev, + "tz_abbrev": timezone.zone, "ledgers": ledger_map, "show_transaction_export": show_transaction_export, "xform_api_url": reverse('single_case_forms', args=[self.domain, self.case_id]),
export/html: Fix table view (media queries) Closes
padding: 0px; box-sizing: border-box; /* overflow: hidden; */ -} - -.view-table .requirement-statement { - margin: 0px; -} - -.view-table .requirement-statement p:first-child { - margin-top: 0; + /* table-layout: fixed; */ } .view-table tr, border: 1px solid var(--color-table-border); } +/* +.view-table .cell-number {} .view-table .cell-uid {} - -.view-table .cell-title { - width: 15%; -} +.view-table .cell-title {} +.view-table .cell-statement {} +.view-table .cell-comment {} +*/ .view-table_requirement .cell-title { color: var(--color-accent-dark); line-height: 1.2; } -.view-table .cell-statement { - width: 40%; +.view-table .requirement-statement { + margin: 0px; } -.view-table .cell-comment { - width: 40%; + +.view-table .requirement-statement p:first-child { + margin-top: 0; } .view-table pre.code { - width: 600px; + width: 200px; margin: 0; + font-size: 0.75em; + line-height: 1.2; + padding: 10px 10px 10px 15px; +} + +@media (min-width: 768px) { + .view-table pre.code { + width: 250px; + } } -.tag { - background: #aaa; - border-radius: 20px; - margin: 2px 0px; - padding: 3px 10px; - display: inline-block; +@media (min-width: 1024px) { + .view-table pre.code { + width: 350px; + } +} + +@media (min-width: 1200px) { + .view-table pre.code { + width: 500px; + } } \ No newline at end of file
fix: prune devices removed from amazon Devices that are no longer reported by Amazon are now removed from HA automatically. closes
@@ -585,6 +585,23 @@ async def setup_alexa(hass, config_entry, login_obj: AlexaLogin): ) hass.data[DATA_ALEXAMEDIA]["accounts"][email]["new_devices"] = False + # prune stale devices + device_registry = await dr.async_get_registry(hass) + for device_entry in dr.async_entries_for_config_entry( + device_registry, config_entry.entry_id + ): + for (_, identifier) in device_entry.identifiers: + if ( + identifier + in hass.data[DATA_ALEXAMEDIA]["accounts"][email]["devices"][ + "media_player" + ].keys() + ): + break + else: + device_registry.async_remove_device(device_entry.id) + _LOGGER.debug("Removing stale device %s", device_entry.name) + await login_obj.save_cookiefile() if login_obj.access_token: hass.config_entries.async_update_entry(
Changed some command-line options for naomi-setup.sh Changed --primary to --system. Changed --local to --local-compile
@@ -55,10 +55,10 @@ for var in "$@"; do if [ "$var" = "--virtualenv" ]; then OPTION="1" fi - if [ "$var" = "--local" ]; then + if [ "$var" = "--local-compile" ]; then OPTION="2" fi - if [ "$var" = "--primary" ]; then + if [ "$var" = "--system" ]; then OPTION="3" fi if [ "$var" = "--help" ]; then @@ -69,10 +69,10 @@ for var in "$@"; do echo " 'workon Naomi' before installing additional libraries" echo " for Naomi)" echo - echo " --local - download, compile and install a special copy of Python 3" + echo " --local-compile - download, compile and install a special copy of Python 3" echo " for Naomi (does not work for all distros)" echo - echo " --primary - use your primary Python 3 environment" + echo " --system - use your primary Python 3 environment" echo " (this can be dangerous, it can lead to a broken python" echo " environment on your system, which other software may" echo " depend on. This is the simplest setup, but only recommended" @@ -231,7 +231,7 @@ if [ $OPTION = "2" ]; then if [ ! -f $TARFILE.asc ]; then wget $URL.asc fi - gpg --list-keys $KEYID || gpg --keyserver keys.gnupg.net --recv-keys $KEYID || gpg --keyserver pgp.mit.edu --recv-keys $KEYID + gpg --list-keys $KEYID || gpg --keyserver pgp.mit.edu --recv-keys $KEYID || gpg --keyserver keys.gnupg.net --recv-keys $KEYID gpg --verify $TARFILE.asc if [ $? -eq 0 ]; then echo "Python tarball signature verified"
Fix type hint for value_chain args Fixes
@@ -465,7 +465,7 @@ def nth_product(index: int, *args: Iterable[_T]) -> Tuple[_T, ...]: ... def nth_permutation( iterable: Iterable[_T], r: int, index: int ) -> Tuple[_T, ...]: ... -def value_chain(*args: Iterable[Any]) -> Iterable[Any]: ... +def value_chain(*args: Union[_T, Iterable[_T]]) -> Iterable[_T]: ... def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ... def combination_index( element: Iterable[_T], iterable: Iterable[_T]
Remove duplicate `any-known-element` context `inside-dict-key` is terminated by that context already.
@@ -180,7 +180,7 @@ contexts: 1: punctuation.definition.tag.begin.xml 2: entity.name.tag.localname.xml 3: punctuation.definition.tag.end.xml - push: [any-known-element, inside-dict-key] + push: inside-dict-key - include: scope:text.xml.plist#whitespace-or-tag inside-dict-key:
Fix line tracing test in Py2.6. See
# mode: run # tag: trace +from cpython.ref cimport PyObject, Py_INCREF, Py_XINCREF, Py_XDECREF + cdef extern from "frameobject.h": ctypedef struct PyFrameObject: - pass - -from cpython.ref cimport PyObject + PyObject *f_trace from cpython.pystate cimport ( Py_tracefunc, @@ -38,20 +38,23 @@ cdef int trace_trampoline(PyObject* _traceobj, PyFrameObject* _frame, int what, if what == PyTrace_CALL: callback = traceobj else: - callback = frame.f_trace + callback = <object>_frame.f_trace if callback is None: return 0 result = callback(frame, what, arg) - frame.f_trace = result + # A bug in Py2.6 prevents us from calling the Python-level setter here, + # or otherwise we would get miscalculated line numbers. Was fixed in Py2.7. + cdef PyObject *tmp = _frame.f_trace + Py_INCREF(result) + _frame.f_trace = <PyObject*>result + Py_XDECREF(tmp) if result is None: PyEval_SetTrace(NULL, None) return 0 - else: - return 0 def _create_trace_func(trace):
tests: quieten signal logger Set signal logger level to CRITICAL to prevent it from spewing out log message on the console for expected errors during tests.
# limitations under the License. # +import logging import unittest from nose.tools import assert_equal, assert_true, assert_false @@ -31,6 +32,11 @@ class Callable(object): class TestPriorityDispatcher(unittest.TestCase): + def setUp(self): + # Stop logger output interfering with nose output in the console. + logger = logging.getLogger('signal') + logger.setLevel(logging.CRITICAL) + def test_ConnectNotify(self): one = Callable(1) two = Callable(2)
Add support for BoingBoing Adds support for the BoingBoing comments system
"urlMain": "https://last.fm/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" + }, + "boingboing.net": { + "errorType": "status_code", + "urlMain": "https://boingboing.net/", + "url": "https://bbs.boingboing.net/u/{}", + "username_claimed": "boingboing", + "username_unclaimed": "noonewouldeverusethis7" } } \ No newline at end of file
osd: bind mount /var/run/udev/ without this, the command `ceph-volume lvm list --format json` hangs and takes a very long time to complete.
@@ -92,6 +92,7 @@ fi -v /var/lib/ceph:/var/lib/ceph:z \ -v /etc/ceph:/etc/ceph:z \ -v /var/run/ceph:/var/run/ceph:z \ + -v /var/run/udev/:/var/run/udev/:z \ {% if ansible_distribution == 'Ubuntu' -%} --security-opt apparmor:unconfined \ {% endif -%}
Should set config even if running populate.py Need to set the value of self.config even if you just ran populate.py
@@ -29,7 +29,6 @@ class Naomi(object): self._logger = logging.getLogger(__name__) if repopulate: populate.run() - else: self.config = profile.get_profile() language = profile.get_profile_var(['language']) if(not language):
Update Readme.rst Clarifies description of what developer can expect when creating error messages. Previous wording was unclear.
@@ -378,7 +378,7 @@ Save and deploy these changes:: "Message": "BadRequestError: Unknown city 'vancouver', valid choices are: portland, seattle" } -We can see now that we can a ``Code`` and ``Message`` key, with the message +We can see now that we have received a ``Code`` and ``Message`` key, with the message being the value we passed to ``BadRequestError``. Whenever you raise a ``BadRequestError`` from your view function, the framework will return an HTTP status code of 400 along with a JSON body with a ``Code`` and ``Message``.
Remove redundant lines of code. These are set in expand_template, so no need to set them here.
@@ -157,8 +157,6 @@ def _expand_template(template_name, **kwargs): def create_dockerfile(template_name, data, dockerfile_dir, verbose=True): - data['template_name'] = template_name - data['wrapper_scripts'] = get_wrapper_scripts() content = expand_template(template_name, data) dockerfile = os.path.join(dockerfile_dir, 'Dockerfile') print("Generating Dockerfile '%s':" % dockerfile)
test_classes: Modify functions to deal with web-public streams. Modify common_subscribe_to_streams to perform subscription in web-public streams as well, and make_stream function to create web-public streams.
@@ -752,6 +752,7 @@ class ZulipTestCase(TestCase): def make_stream(self, stream_name: str, realm: Optional[Realm]=None, invite_only: bool=False, + is_web_public: bool=False, history_public_to_subscribers: Optional[bool]=None) -> Stream: if realm is None: realm = get_realm('zulip') @@ -764,6 +765,7 @@ class ZulipTestCase(TestCase): realm=realm, name=stream_name, invite_only=invite_only, + is_web_public=is_web_public, history_public_to_subscribers=history_public_to_subscribers, ) except IntegrityError: # nocoverage -- this is for bugs in the tests @@ -807,9 +809,11 @@ class ZulipTestCase(TestCase): # Subscribe to a stream by making an API request def common_subscribe_to_streams(self, user: UserProfile, streams: Iterable[str], extra_post_data: Dict[str, Any]={}, invite_only: bool=False, + is_web_public: bool=False, allow_fail: bool=False, **kwargs: Any) -> HttpResponse: post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]), + 'is_web_public': ujson.dumps(is_web_public), 'invite_only': ujson.dumps(invite_only)} post_data.update(extra_post_data) result = self.api_post(user, "/api/v1/users/me/subscriptions", post_data, **kwargs)
Add systest for round-trip of NULL INT64. Include NULL values in ARRAY<INT64>.
@@ -371,10 +371,11 @@ class TestSessionAPI(unittest.TestCase, _TestData): BYTES_1 = b'Ymlu' BYTES_2 = b'Ym9vdHM=' ALL_TYPES_ROWDATA = ( + ([], False, None, None, 0.0, None, None, None), ([1], True, BYTES_1, SOME_DATE, 0.0, 19, u'dog', SOME_TIME), ([5, 10], True, BYTES_1, None, 1.25, 99, u'cat', None), ([], False, BYTES_2, None, float('inf'), 107, u'frog', None), - ([], False, None, None, float('-inf'), 207, None, None), + ([3, None, 9], False, None, None, float('-inf'), 207, None, None), ([], False, None, None, float('nan'), 1207, None, None), ([], False, None, None, OTHER_NAN, 2000, None, NANO_TIME), ) @@ -903,7 +904,7 @@ class TestSessionAPI(unittest.TestCase, _TestData): params={'lower': 0.0, 'upper': 1.0}, param_types={ 'lower': Type(code=FLOAT64), 'upper': Type(code=FLOAT64)}, - expected=[(19,)], + expected=[(None,), (19,)], ) # Find -inf
Fix Attribute Error for email_address in smallest_subnet['emails'].split("\n"): AttributeError: 'list' object has no attribute 'split'
@@ -42,7 +42,7 @@ class NetworkWhois(OneShotAnalytics): # Link it to every email address referenced if smallest_subnet['emails']: - for email_address in smallest_subnet['emails'].split("\n"): + for email_address in smallest_subnet['emails']: email = Email.get_or_create(value=email_address) links.update(company.link_to(email, None, 'Network Whois'))
Update buildinfo.json bump metronome to 0.6.65
], "single_source": { "kind": "url_extract", - "url": "https://s3.amazonaws.com/downloads.mesosphere.io/metronome/builds/0.6.63-b7b4a2c/metronome-0.6.63-b7b4a2c.tgz", - "sha1": "e9790e183b5a902f80fb1951c5cc17f384238230" + "url": "https://s3.amazonaws.com/downloads.mesosphere.io/metronome/builds/0.6.65-50400db/metronome-0.6.65-50400db.tgz", + "sha1": "2fc7e215a94a02acaa975ec04ebbbb81b0077467" }, "username": "dcos_metronome", "state_directory": true
Update mkvtomp4.py Error checking to make sure ffprobe and subsequently generateOptions return something valid, and catches with error messages when that does not occur
@@ -203,6 +203,9 @@ class MkvtoMp4: if self.needProcessing(inputfile): options, preopts, postopts = self.generateOptions(inputfile, original=original) + if not options: + self.log.error("Error converting, inputfile had a valid extension but returned no data. Either the file does not exist, was unreadable, or was an incorrect format.") + return False try: self.log.info("Output Data") @@ -340,6 +343,10 @@ class MkvtoMp4: input_dir, filename, input_extension = self.parseFile(inputfile) info = self.converter.probe(inputfile) + if not info: + self.log.error("FFProbe returned no value, either the file does not exist or is not a format FFPROBE can read.") + return None, None, None + self.log.info("Input Data") self.log.info(json.dumps(info.toJson(), sort_keys=False, indent=4)) # Video stream
click_handlers: Use e.button instead of deceprated e.which. The middle button click is represented by 1. Tested by clicking middle button on zulip logo and making sure it is not focused.
@@ -758,7 +758,7 @@ export function initialize() { // Don't focus links on middle click. $("body").on("mouseup", "a", (e) => { - if (e.which === 2) { + if (e.button === 1) { // middle click e.target.blur(); }
Remove JSON wording we support more than just JSON endpoints here
# Public APIs [![Build Status](https://api.travis-ci.org/toddmotto/public-apis.svg)](https://travis-ci.org/toddmotto/public-apis) -A collective list of free JSON APIs for use in web development. +A collective list of free APIs for use in web development. A JSON encoding of all entries can be found [here](json).
Label plot axes in LineProfile plugin plus some improvements to error handling
@@ -43,6 +43,8 @@ class LineProfile(GingaPlugin.LocalPlugin): self.tw = None self.mark_data_x = [None] self.mark_data_y = [None] + self.y_lbl = 'Flux' + self.x_lbl = '' self.gui_up = False @@ -247,15 +249,17 @@ Use MultiDim to change step values of axes.""") naxes = mddata.ndim if self.selected_axis: - axis_data = self.get_axis(self.selected_axis) - if axis_data is None: + plot_x_axis_data = self.get_axis(self.selected_axis) + if plot_x_axis_data is None: # image may lack the required keywords, or some trouble # building the axis return slice_obj = self._slice(naxes, mk=mark) + plot_y_axis_data = mddata[slice_obj] self.clear_plot() - self.plot.plot(axis_data, mddata[slice_obj]) + self.plot.plot(plot_x_axis_data, plot_y_axis_data, + xtitle=self.x_lbl, ytitle=self.y_lbl) else: self.fv.show_error("Please select an axis") @@ -266,12 +270,12 @@ Use MultiDim to change step values of axes.""") # For axes 1 and 2 if mk is not None: - slice_obj[0] = self.mark_data_x[mk] - slice_obj[1] = self.mark_data_y[mk] + slice_obj[0] = int(round(self.mark_data_x[mk])) + slice_obj[1] = int(round(self.mark_data_y[mk])) # For axis > 3 for i in range(2, naxes): - slice_obj[i] = self.image.revnaxis[i-2] + 1 + slice_obj[i] = int(round(self.image.revnaxis[i-2] + 1)) # Slice selected axis slice_obj[self.selected_axis-1] = slice(None, None, None) @@ -280,13 +284,24 @@ Use MultiDim to change step values of axes.""") def get_axis(self, i): try: + self.x_lbl = self.image.get_keyword('CTYPE%d' % i, None) try: kwds = ['CRVAL%d' % i, 'NAXIS%d' % i, 'CDELT%d' % i] crval_i, naxis_i, cdelt_i = self.image.get_keywords_list(*kwds) + except KeyError as e: raise ValueError("Missing FITS keyword: %s" % str(e)) axis = crval_i + np.arange(0, naxis_i, 1) * cdelt_i + + if self.x_lbl is not None: + units = self.image.get_keyword('CUNIT%d' % i, None) + if units is not None: + self.x_lbl += (' (%s)' % str(units)) + else: + self.x_lbl = '' + # Assume Y label should always be flux? + self.y_lbl = 'Flux' return axis except Exception as e:
Update ilap_artifacts.py Fix path
@@ -144,7 +144,7 @@ tosearch = {'lastBuild': ('IOS Build', '*LastBuildInfo.plist'), 'aggDictpasscodetype': ('Aggregate Dictionary', '*/AggregateDictionary/ADDataStore.sqlitedb'), 'alarms': ('Alarms', '*private/var/mobile/Library/Preferences/com.apple.mobiletimerd.plist'), 'appConduit': ('App Conduit', '**/AppConduit.log.*'), - 'appGrouplisting': ('Installed Apps', ('**/Containers/Shared/AppGroup/*/.com.apple.mobile_container_manager.metadata.plist', '**/PluginKitPlugin/*.metadata.plist')), + 'appGrouplisting': ('Installed Apps', ('*/Containers/Shared/AppGroup/*/.com.apple.mobile_container_manager.metadata.plist', '**/PluginKitPlugin/*.metadata.plist')), 'appItunesmeta': ('Installed Apps', ('**/iTunesMetadata.plist', '**/BundleMetadata.plist')), 'appleMapsApplication': ('Locations', '**/Data/Application/*/Library/Preferences/com.apple.Maps.plist'), 'appleMapsGroup': ('Locations', '**/Shared/AppGroup/*/Library/Preferences/group.com.apple.Maps.plist'), @@ -228,7 +228,7 @@ tosearch = {'lastBuild': ('IOS Build', '*LastBuildInfo.plist'), 'sms': ('SMS & iMessage', '**/sms.db'), 'slack': ('Slack', '*/var/mobile/Containers/Data/Application/*/Library/Application Support/Slack/*/Database/main_db*'), 'tcc': ('App Permissions', '*TCC.db*'), - 'teamsSegment': ('Teams Logs', '*TCC.db*'), + 'teamsSegment': ('Teams Logs', '*/var/mobile/Containers/Data/Application/*/Library/DriveIQ/segments/current/*.*'), 'tikTok': ('TikTok', ('*/Application/*/Library/Application Support/ChatFiles/*/db.sqlite*', '*AwemeIM.db*')), 'tileApp': ('Locations', '*private/var/mobile/Containers/Data/Application/*/Library/log/com.thetileapp.tile*'), 'tileAppDb': ('Locations', '*private/var/mobile/Containers/Shared/AppGroup/*/com.thetileapp.tile-TileNetworkDB.sqlite*'),
fix broken tft.md link Broken link in tft.md file and updated with the correct [link] (https://www.tensorflow.org/tfx/transform/api_docs/python/tft)
Transform is available as a standalone library. - [Getting Started with TensorFlow Transform](/tfx/transform/get_started) -- [TensorFlow Transform API Reference](/tfx/transform/api_docs/python/tft) +- [TensorFlow Transform API Reference](https://www.tensorflow.org/tfx/transform/api_docs/python/tft) The `tft` module documentation is the only module that is relevant to TFX users. The `tft_beam` module is relevant only when using Transform as a standalone library. Typically, a TFX user constructs a `preprocessing_fn`, and the rest of the
Only verify if context menus pass a length check According to Discord this is the check that is actually done
@@ -126,7 +126,6 @@ else: CheckInputParameter = Union['Command[Any, ..., Any]', 'ContextMenu', CommandCallback, ContextMenuCallback] VALID_SLASH_COMMAND_NAME = re.compile(r'^[\w-]{1,32}$') -VALID_CONTEXT_MENU_NAME = re.compile(r'^[?!\w\s-]{1,32}$') CAMEL_CASE_REGEX = re.compile(r'(?<!^)(?=[A-Z])') @@ -157,7 +156,7 @@ def validate_name(name: str) -> str: def validate_context_menu_name(name: str) -> str: - if VALID_CONTEXT_MENU_NAME.match(name) is None: + if not name or len(name) > 32: raise ValueError('context menu names must be between 1-32 characters') return name
Follow up to console port allocation Addressed a comment in Story: Task: 38135
@@ -843,9 +843,8 @@ def _allocate_port(task): def _release_allocated_port(task): node = task.node dii = node.driver_internal_info or {} - allocated_port = dii.get('allocated_ipmi_terminal_port') + allocated_port = dii.pop('allocated_ipmi_terminal_port', None) if allocated_port: - dii.pop('allocated_ipmi_terminal_port') node.driver_internal_info = dii node.save() console_utils.release_port(allocated_port)
Update README.md Adding di to Maintainers
## Maintainers -- [Sarah Aoun](https://github.com/saoun) @ [Open Technology Fund](https://www.opentech.fund/) +- [Di Luong](https://www.opentech.fund/about/people/di-luong/) @ [Open Technology Fund](https://www.opentech.fund/) - [Dan Blah](https://github.com/danblah) @ [Reset](https://www.reset.tech/) - [Fredrik Jonsson](https://github.com/frjo) @ [Combonetwork](https://www.combonet.se/)
tools/chainload.py: Default to copying SEPFW This doesn't really take any significant time and is the correct thing to do. Use --no-sepfw for e.g. serial bring-up where doing the ADT dance costs measurable time.
@@ -7,7 +7,7 @@ import argparse, pathlib, time parser = argparse.ArgumentParser(description='Mach-O loader for m1n1') parser.add_argument('-q', '--quiet', action="store_true", help="Disable framebuffer") -parser.add_argument('-x', '--xnu', action="store_true", help="Load XNU") +parser.add_argument('-n', '--no-sepfw', action="store_true", help="Do not preserve SEPFW") parser.add_argument('-c', '--call', action="store_true", help="Use call mode") parser.add_argument('payload', type=pathlib.Path) parser.add_argument('boot_args', default=[], nargs="*") @@ -31,10 +31,10 @@ entry += new_base if args.quiet: p.iodev_set_usage(IODEV.FB, 0) -if args.xnu: - sepfw_start, sepfw_length = u.adt["chosen"]["memory-map"].SEPFW -else: +if args.no_sepfw: sepfw_start, sepfw_length = 0, 0 +else: + sepfw_start, sepfw_length = u.adt["chosen"]["memory-map"].SEPFW image_size = align(len(image)) sepfw_off = image_size @@ -50,13 +50,15 @@ print(f"Loading kernel image (0x{len(image):x} bytes)...") u.compressed_writemem(image_addr, image, True) p.dc_cvau(image_addr, len(image)) -if args.xnu: +if not args.no_sepfw: print(f"Copying SEPFW (0x{sepfw_length:x} bytes)...") p.memcpy8(image_addr + sepfw_off, sepfw_start, sepfw_length) print(f"Adjusting addresses in ADT...") u.adt["chosen"]["memory-map"].SEPFW = (new_base + sepfw_off, sepfw_length) u.adt["chosen"]["memory-map"].BootArgs = (image_addr + bootargs_off, bootargs_size) + u.push_adt() + print("Setting secondary CPU RVBARs...") rvbar = entry & ~0xfff @@ -65,16 +67,10 @@ if args.xnu: print(f" {cpu.name}: [0x{addr:x}] = 0x{rvbar:x}") p.write64(addr, rvbar) - u.push_adt() - print("Setting up bootargs...") tba = u.ba.copy() -if args.xnu: tba.top_of_kernel_data = new_base + image_size -else: - # SEP firmware is in here somewhere, keep top_of_kdata high so we hopefully don't clobber it - tba.top_of_kernel_data = max(tba.top_of_kernel_data, new_base + image_size) if len(args.boot_args) > 0: boot_args = " ".join(args.boot_args)
Match.construct: rename "scope" to "outer_scope" TN:
@@ -843,10 +843,11 @@ class Match(AbstractExpression): :rtype: ResolvedExpression """ + outer_scope = PropertyDef.get_scope() + # Add the variables created for this expression to the current scope - scope = PropertyDef.get_scope() for _, var, _ in self.matchers: - scope.add(var.local_var) + outer_scope.add(var.local_var) matched_expr = construct(self.matched_expr) check_source_language(issubclass(matched_expr.type, ASTNode) @@ -865,7 +866,7 @@ class Match(AbstractExpression): type=matched_expr.type, create_local=True ) - PropertyDef.get_scope().add(matched_abstract_var.local_var) + outer_scope.add(matched_abstract_var.local_var) matched_var = construct(matched_abstract_var) constructed_matchers = []
Fix minor typo Change uia_phone_number to uia_phone_mapper
@@ -320,7 +320,7 @@ These configuration keys are used globally across all features. [ {"email": {"mapper": uia_email_mapper, "case_insensitive": True}}, - {"us_phone_number": {"mapper": uia_phone_number}}, + {"us_phone_number": {"mapper": uia_phone_mapper}}, ] @@ -1056,7 +1056,7 @@ Unified Signin If you select ``sms`` then make sure you add this to :py:data:`SECURITY_USER_IDENTITY_ATTRIBUTES`:: - {"us_phone_number": {"mapper": uia_phone_number}}, + {"us_phone_number": {"mapper": uia_phone_mapper}}, Default: ``["password", "email", "authenticator", "sms"]`` - which are the only supported options.
Add local_get extra checks TODO This is also a prerequisite for simplified get key, which is a recommended step for .filter(pk_in=...) / prefetch_related optimization.
@@ -317,6 +317,13 @@ class QuerySetMixin(object): # which is very fast, but not invalidated. # Don't bother with Q-objects, select_related and previous filters, # simple gets - thats what we are really up to here. + # + # TODO: this checks are far from adequete, at least these are missed: + # - settings.CACHEOPS_ENABLED + # - self._for_write + # - self._fields (values, values_list) + # - annotations + # - ... if self._cacheprofile['local_get'] \ and not args \ and not self.query.select_related \
tests: reduce the amount of time we wait This sleep 120 looks a bit long, let's reduce this to 30sec and see if things go faster.
@@ -229,16 +229,16 @@ commands= copy_admin_key={env:COPY_ADMIN_KEY:False} \ " - # wait 2 minutes for services to be ready - sleep 120 + # wait 30sec for services to be ready + sleep 30 # test cluster state using ceph-ansible tests testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests # reboot all vms ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml - # wait 2 minutes for services to be ready - sleep 120 + # wait 30sec for services to be ready + sleep 30 # retest to ensure cluster came back up correctly after rebooting testinfra -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
localtest: Easier access to DockerInDocker environment Adding an environment variable for the Docker socket that 'runtests.sh' uses, makes it easier to access the DinD instance from other parent container processes.
@@ -46,5 +46,6 @@ sudo docker run -ti \ -v /tmp/faucet-pip-cache:/var/tmp/pip-cache \ -v /lib/modules:/lib/modules \ -v /var/local/lib/docker:/var/lib/docker \ + -e DOCKER_HOST=unix:///var/local/run/docker.sock \ -e FAUCET_TESTS="$FAUCET_TESTS" \ faucet/tests $CMD
Make celery monitoring heartbeat tasks use ignore_result=True To avoid unnecessary reads and writes to the postgres result backend.
@@ -97,5 +97,5 @@ class Heartbeat(object): heartbeat.__name__ = str(self.periodic_task_name) - heartbeat = periodic_task(run_every=HEARTBEAT_FREQUENCY, queue=self.queue)(heartbeat) + heartbeat = periodic_task(run_every=HEARTBEAT_FREQUENCY, queue=self.queue, ignore_result=True)(heartbeat) return heartbeat
Update README.md added newsletter link in readme
| <a href="https://discord.gg/xZjKRaNp8b">Join Discord</a> | + <a href="https://evidentlyai.com/sign-up">Newsletter</a> + | <a href="https://evidentlyai.com/blog">Blog</a> | <a href="https://twitter.com/EvidentlyAI">Twitter</a>
Removed "View Larger" button for images. To save space, make the image itself a link.
</article> <script type="text/html" id="image-preview-template"> - <img data-bind="attr: { src: thumb_url }" />&nbsp;&nbsp; - <a target="_blank" - class="btn btn-default preview-media" - data-bind="attr: { href: url }" - data-toggle="tooltip" data-title="{% trans 'Opens image in new tab' %}.">{% trans 'View Larger' %}</a> + <a target="_blank" data-bind="attr: { href: url }"> + <img class="preview-media" + data-bind="attr: { src: thumb_url }" + data-toggle="tooltip" + data-title="{% trans 'Open image in new tab' %}." /> + </a> </script> <script type="text/html" id="audio-preview-template"> <a target="_blank" class="btn btn-default preview-media" data-bind="attr: { href: url }" - data-toggle="tooltip" data-title="{% trans 'Opens file in new tab' %}.">{% trans 'Hear Audio' %}</a> + data-toggle="tooltip" data-title="{% trans 'Open file in new tab' %}.">{% trans 'Hear Audio' %}</a> </script> <script type="text/html" id="video-preview-template"> <a target="_blank" class="btn btn-default preview-media" data-bind="attr: { href: url }" - data-toggle="tooltip" data-title="{% trans 'Opens file in new tab' %}.">{% trans 'View Video' %}</a> + data-toggle="tooltip" data-title="{% trans 'Open file in new tab' %}.">{% trans 'View Video' %}</a> </script> {% for uploader in uploaders %}
Return always `uniqueNonce` even if count is 0 Closes
@@ -454,9 +454,7 @@ class SafeMultisigTransactionListView(ListAPIView): ) response = super().get(request, *args, **kwargs) - response.data["count_unique_nonce"] = ( - self.get_unique_nonce(address) if response.data["count"] else 0 - ) + response.data["count_unique_nonce"] = self.get_unique_nonce(address) return response @swagger_auto_schema(
ch_tests_tool: prefer SerialConsole for kernel logs Use SerialConsole for fetching kernel logs. It is more versatile than dmesg. Fallback to dmesg if SerialConsole feature is not supported.
@@ -10,6 +10,7 @@ from assertpy.assertpy import assert_that, fail from lisa import Environment, notifier from lisa.executable import Tool +from lisa.features import SerialConsole from lisa.messages import SubTestMessage, TestStatus, create_test_result_message from lisa.operating_system import CBLMariner from lisa.testsuite import TestResult @@ -83,7 +84,7 @@ class CloudHypervisorTests(Tool): r.status, ) - self._save_dmesg_logs(log_path) + self._save_kernel_logs(log_path) has_failures = len(failures) > 0 if result.is_timeout and has_failures: @@ -156,7 +157,7 @@ class CloudHypervisorTests(Tool): with open(testcase_log_file, "w") as f: f.write(result.stdout) - self._save_dmesg_logs(log_path) + self._save_kernel_logs(log_path) assert_that( failed_testcases, f"Failed Testcases: {failed_testcases}" @@ -298,7 +299,14 @@ class CloudHypervisorTests(Tool): return result.group(0) return "" - def _save_dmesg_logs(self, log_path: Path) -> None: + def _save_kernel_logs(self, log_path: Path) -> None: + # Use serial console if available. Serial console logs can be obtained + # even if the node goes down (hung, panicked etc.). Whereas, dmesg + # can only be used if node is up and LISA is able to connect via SSH. + if self.node.features.is_supported(SerialConsole): + serial_console = self.node.features[SerialConsole] + serial_console.get_console_log(log_path, force_run=True) + else: dmesg_str = self.node.tools[Dmesg].get_output(force_run=True) dmesg_path = log_path / "dmesg" with open(str(dmesg_path), "w") as f:
Deprecate TrainerProperties Mixin and move property definitions directly into `trainer.py` Summary: ### New commit log messages Deprecate TrainerProperties Mixin and move property definitions directly into `trainer.py`
@@ -139,10 +139,9 @@ class ModelManager: rank = get_rank() if rank == 0: - trainer_logger = lightning_trainer.logger # pyre-ignore + trainer_logger = lightning_trainer.logger logger_data = trainer_logger.line_plot_aggregated - # pyre-ignore trainer_logger.clear_local_data() if reporter is None: training_report = None
Corrects model serving command line in a tutorial The command line without this fix would give the error below: `Error: Missing option "--model-path" / "-m".`
@@ -373,11 +373,11 @@ in MLflow saved the model as an artifact within the run. In this example, you can use this MLmodel format with MLflow to deploy a local REST server that can serve predictions. - To deploy the server, run: + To deploy the server, run (replace the path with your model's actual path): .. code:: - mlflow pyfunc serve /Users/mlflow/mlflow-prototype/mlruns/0/7c1a0d5c42844dcdb8f5191146925174/artifacts/model -p 1234 + mlflow pyfunc serve -m /Users/mlflow/mlflow-prototype/mlruns/0/7c1a0d5c42844dcdb8f5191146925174/artifacts/model -p 1234 .. note::
removes use_instances from function signature because it doesn't do anything anymore
@@ -436,7 +436,7 @@ class OpenAPIConverter(object): def fields2parameters( self, fields, schema=None, use_refs=True, default_in='body', name='body', required=False, - use_instances=False, description=None, **kwargs + description=None, **kwargs ): """Return an array of OpenAPI parameters given a mapping between field names and :class:`Field <marshmallow.Field>` objects. If `default_in` is "body", then return an array @@ -456,7 +456,7 @@ class OpenAPIConverter(object): openapi_default_in = __location_map__.get(default_in, default_in) if self.openapi_version.major < 3 and openapi_default_in == 'body': if schema is not None: - prop = self.resolve_schema_dict(schema, use_instances=use_instances) + prop = self.resolve_schema_dict(schema) else: prop = self.fields2jsonschema(fields, use_refs=use_refs) @@ -683,15 +683,13 @@ class OpenAPIConverter(object): } return ref_paths[self.openapi_version.major] - def resolve_schema_dict(self, schema, use_instances=False): + def resolve_schema_dict(self, schema): if isinstance(schema, dict): if schema.get('type') == 'array' and 'items' in schema: - schema['items'] = self.resolve_schema_dict( - schema['items'], use_instances=use_instances, - ) + schema['items'] = self.resolve_schema_dict(schema['items']) if schema.get('type') == 'object' and 'properties' in schema: schema['properties'] = { - k: self.resolve_schema_dict(v, use_instances=use_instances) + k: self.resolve_schema_dict(v) for k, v in schema['properties'].items() } return schema
Refactor Changed listener to expect only the Event model
@@ -35,5 +35,5 @@ class EventListener(StoppableThread): while not self.wait(0.1): while not self.events_queue.empty(): - event_type, event = self.events_queue.get() - self.process_next_message(event_type, event) + event = self.events_queue.get() + self.process_next_message(event)
Remove dependency on matplotlib Matplotlib was used just to include the colormap viridis, this has been changes to import it from pyqtgraph.
# import logging -import sys import numpy as np import pyqtgraph as pg @@ -32,16 +31,6 @@ from .Qt import QtCore log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) -try: - from matplotlib.cm import viridis -except ImportError: - log.warning("Matplotlib not found. Images will be greyscale") - - -def _greyscale_colormap(x): - """Simple greyscale colormap. Assumes x is already normalized.""" - return np.array([x, x, x, 1]) - class ResultsCurve(pg.PlotDataItem): """ Creates a curve loaded dynamically from a file through the Results object. The data can @@ -87,10 +76,7 @@ class ResultsImage(pg.ImageItem): self.ysize = int(np.ceil((self.yend - self.ystart) / self.ystep)) + 1 self.img_data = np.zeros((self.ysize, self.xsize, 4)) self.force_reload = force_reload - if 'matplotlib.cm' in sys.modules: - self.colormap = viridis - else: - self.colormap = _greyscale_colormap + self.cm = pg.colormap.get('viridis') super().__init__(image=self.img_data) @@ -138,6 +124,10 @@ class ResultsImage(pg.ImageItem): else: return int(x) + def colormap(self, x): + """ Return mapped color as 0.0-1.0 floats RGBA """ + return self.cm.map(x, mode='float') + # TODO: colormap selection
org_settings: Remove print statements. This commit deletes the redundant `print` statement from the `test_upload.py` file.
@@ -1703,9 +1703,7 @@ class S3Test(ZulipTestCase): zerver.lib.upload.upload_backend.upload_realm_logo_image(image_file, user_profile, night) original_path_id = os.path.join(str(user_profile.realm.id), "realm", "%s.original" % (file_name)) - print(original_path_id) original_key = bucket.get_key(original_path_id) - print(original_key) image_file.seek(0) self.assertEqual(image_file.read(), original_key.get_contents_as_string())
for audio - hide the VLC inctance window the window is sized at 1x1 pixel.
@@ -211,7 +211,7 @@ class DialogCodeAV(QtWidgets.QDialog): self.ddialog.setWindowFlags(self.ddialog.windowFlags() | QtCore.Qt.CustomizeWindowHint) # Disable close button, only close through closing the Ui_Dialog_code_av self.ddialog.setWindowFlags(self.ddialog.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint) - self.ddialog.resize(640, 480) + self.ddialog.resize(1, 1) self.ddialog.gridLayout = QtWidgets.QGridLayout(self.ddialog) self.ddialog.dframe = QtWidgets.QFrame(self.ddialog) self.ddialog.dframe.setObjectName("frame") @@ -492,6 +492,10 @@ class DialogCodeAV(QtWidgets.QDialog): mb.exec_() self.closeEvent() return + #TODO sizes + if self.media_data['mediapath'][0:7] != "/audio/": + self.ddialog.resize(640, 480) + # clear comboBox tracks options and reload when playing/pausing self.ui.comboBox_tracks.clear() # Put the media in the media player @@ -2089,7 +2093,7 @@ class DialogViewAV(QtWidgets.QDialog): # disable close button, only close through closing the Ui_Dialog_view_av self.ddialog.setWindowFlags(self.ddialog.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint) self.ddialog.setWindowTitle(self.media_data['mediapath']) - self.ddialog.resize(640, 480) + self.ddialog.resize(1,1) self.ddialog.gridLayout = QtWidgets.QGridLayout(self.ddialog) self.ddialog.dframe = QtWidgets.QFrame(self.ddialog) self.ddialog.dframe.setObjectName("frame") @@ -2127,6 +2131,9 @@ class DialogViewAV(QtWidgets.QDialog): mb.exec_() self.closeEvent() return + if self.media_data['mediapath'][0:7] != "/audio/": + #TODO sizes + self.ddialog.resize(640, 480) # Put the media in the media player self.mediaplayer.set_media(self.media)
hierachy_layout: Move number of via arg to add_power_pins() this allows custom modules to state how many vias they need for power rails.
@@ -1015,7 +1015,7 @@ class layout(): - def add_power_pin(self, name, loc, vertical=False, start_layer="m1"): + def add_power_pin(self, name, loc, size=[1,1], vertical=False, start_layer="m1"): """ Add a single power pin from M3 down to M1 at the given center location. The starting layer is specified to determine which vias are needed. @@ -1027,12 +1027,14 @@ class layout(): if start_layer=="m1": self.add_via_center(layers=self.m1_stack, + size=size, offset=loc, directions=direction) if start_layer=="m1" or start_layer=="m2": via=self.add_via_center(layers=self.m2_stack, + size=size, offset=loc, directions=direction)
go: update build tags check from latest go sources Update the build tags check from the latest `go` sources: Support `unix` build tag. Support `boringcrypto` build tag.
@@ -16,8 +16,27 @@ import ( // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// This file was adapted from Go src/go/build/build.go at commit 7c694fbad1ed6f2f825fd09cf7a86da3be549cea -// on 2022-02-25. +// This file was adapted from Go toolchain: +// https://github.com/golang/go/blob/2da8a55584aa65ce1b67431bb8ecebf66229d462/src/go/build/build.go + +// unixOS is the set of GOOS values matched by the "unix" build tag. +// This is not used for filename matching. +// This list also appears in cmd/dist/build.go and +// cmd/go/internal/imports/build.go. +var unixOS = map[string]bool{ + "aix": true, + "android": true, + "darwin": true, + "dragonfly": true, + "freebsd": true, + "hurd": true, + "illumos": true, + "ios": true, + "linux": true, + "netbsd": true, + "openbsd": true, + "solaris": true, +} // matchTag reports whether the name is one of: // @@ -27,6 +46,9 @@ import ( // ctxt.Compiler // linux (if GOOS = android) // solaris (if GOOS = illumos) +// darwin (if GOOS = ios) +// unix (if this is a Unix GOOS) +// boringcrypto (if GOEXPERIMENT=boringcrypto is enabled) // tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags) // // It records all consulted tags in allTags. @@ -51,6 +73,12 @@ func matchTag(ctxt *build.Context, name string, allTags map[string]bool) bool { if ctxt.GOOS == "ios" && name == "darwin" { return true } + if name == "unix" && unixOS[ctxt.GOOS] { + return true + } + if name == "boringcrypto" { + name = "goexperiment.boringcrypto" // boringcrypto is an old name for goexperiment.boringcrypto + } // other tags for _, tag := range ctxt.BuildTags {
DOC: Fix a mistake in the 1.7.0 release notes. The function that was added to the C API (in is `PyArray_FailUnlessWriteable`.
@@ -162,7 +162,7 @@ Added experimental support for the AArch64 architecture. C API ----- -New function ``PyArray_RequireWriteable`` provides a consistent interface +New function ``PyArray_FailUnlessWriteable`` provides a consistent interface for checking array writeability -- any C code which works with arrays whose WRITEABLE flag is not known to be True a priori, should make sure to call this function before writing.
Fix format in live-migration-usage.rst It is :command:`some_command` instead of :command:``some_command``.
@@ -11,8 +11,8 @@ non-live-migration options. The instructions below cover shared-storage and volume-backed migration. To block-migrate instances, add the command-line option -:command:``--block-migrate`` to the :command:``nova live-migration`` command, -and :command:``--block-migration`` to the :command:``openstack server migrate`` +``-block-migrate`` to the :command:`nova live-migration` command, +and ``--block-migration`` to the :command:`openstack server migrate` command. .. _section-manual-selection-of-dest:
Move closing parenthesis to the end of the previous line I think the parenthesis is less distracting when placed there.
@@ -358,8 +358,7 @@ flow is zero (conversion_factor_single_flow). inputs={b_gas: solph.Flow(nominal_value=10e10)}, outputs={b_el: solph.Flow(), b_th: solph.Flow()}, conversion_factors={b_el: 0.3, b_th: 0.5}, - conversion_factor_single_flow={b_el: 0.5} - ) + conversion_factor_single_flow={b_el: 0.5}) For :py:class:`~oemof.solph.components.ExtractionTurbineCHP` instances, the following constraints are created:
Fix release-strategy link [ci skip-rust-tests] [ci skip-jvm-tests]
@@ -24,7 +24,7 @@ how to get your change committed. + [[Pants Style Guide|https://pants.readme.io/docs/style-guide]] + Releasing Pants + [[Release Process|https://pants.readme.io/docs/release-process]] - + [[Release Strategy|https://pants.readme.io/docs/releases-strategy]] + + [[Release Strategy|https://pants.readme.io/docs/release-strategy]] + [[Deprecation Policy|https://pants.readme.io/docs/deprecation-policy]] + [[Updating the Docs|pants('src/docs:docs')]] + [[Pants Committers|https://pants.readme.io/docs/committers]]
Improve Python version detection Ubuntu 16.04 no longer includes a 'python' executable. If none of {$PYTHON,python,python2,python3} are usable, we advise the user to either set PYTHON or install a usable python.
#!/usr/bin/env bash -# Mininet install script for Ubuntu (and Debian Wheezy+) -# Brandon Heller ([email protected]) +# Mininet install script for Ubuntu and Debian +# Original author: Brandon Heller # Fail on error set -e @@ -102,14 +102,26 @@ function version_ge { [ "$1" == "$latest" ] } -# Attempt to identify Python version +# Attempt to detect Python version PYTHON=${PYTHON:-python} -if $PYTHON --version |& grep 'Python 2' > /dev/null; then - PYTHON_VERSION=2; PYPKG=python -else - PYTHON_VERSION=3; PYPKG=python3 +PRINTVERSION='import sys; print(sys.version_info)' +PYTHON_VERSION=unknown +for python in $PYTHON python2 python3; do + if $python -c "$PRINTVERSION" |& grep 'major=2'; then + PYTHON=$python; PYTHON_VERSION=2; PYPKG=python + break + elif $python -c "$PRINTVERSION" |& grep 'major=3'; then + PYTHON=$python; PYTHON_VERSION=3; PYPKG=python3 + break fi -echo "${PYTHON} is version ${PYTHON_VERSION}" +done +if [ "$PYTHON_VERSION" == unknown ]; then + echo "Can't find a working python command ('$PYTHON' doesn't work.)" + echo "You may wish to export PYTHON or install a working 'python'." + exit 1 +fi + +echo "Detected Python (${PYTHON}) version ${PYTHON_VERSION}" # Kernel Deb pkg to be removed: KERNEL_IMAGE_OLD=linux-image-2.6.26-33-generic
wrap LSTMCell in DropoutWrapper if dropout param Adds a `dropout=_` param to `lstm()`, which if provided wraps `lstm_cell` in a `rnn.DropoutWrapper` for regularization. Feel free to close if there's a smarter way, more for consideration on providing a dropout option for these layers where applicable.
@@ -234,12 +234,13 @@ def conv2d(x, size, window=3, stride=1, padding='SAME', bias=False, activation=' return x -def lstm(x, size=None, summary_level=0): +def lstm(x, size=None, summary_level=0, dropout=None): """ Args: x: Input tensor. size: Layer size, defaults to input size. + dropout: dropout_keep_prob (eg 0.5) for regularization, applied via rnn.DropoutWrapper Returns: @@ -254,6 +255,8 @@ def lstm(x, size=None, summary_level=0): with tf.variable_scope('lstm'): internal_input = tf.placeholder(dtype=tf.float32, shape=(None, 2, size)) lstm_cell = tf.contrib.rnn.LSTMCell(num_units=size) + if dropout: + lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=dropout) c = internal_input[:, 0, :] h = internal_input[:, 1, :] state = tf.contrib.rnn.LSTMStateTuple(c=c, h=h)
Fix in ManagedObject search by prefix HG-- branch : feature/microservices
@@ -36,6 +36,7 @@ from noc.main.models.notificationgroup import NotificationGroup from noc.inv.models.networksegment import NetworkSegment from noc.core.profile.loader import loader as profile_loader from noc.core.model.fields import INETField, TagsField, DocumentReferenceField +from noc.lib.db import SQL from noc.lib.app.site import site from noc.lib.stencil import stencil_registry from noc.lib.validators import is_ipv4, is_ipv4_prefix @@ -985,9 +986,7 @@ class ManagedObject(Model): elif is_ipv4_prefix(query): # Match by prefix p = IP.prefix(query) - if p.mask >= 16: - return Q(address__gte=p.first.address, - address__lte=p.last.address) + return SQL("address::inet <<= '%s'" % p) else: try: mac = MACAddressParameter().clean(query)
Change InsightsEvaluator to pass stream along to its superclass during construction. Fixes
@@ -118,7 +118,7 @@ class SingleEvaluator(Evaluator): class InsightsEvaluator(SingleEvaluator): def __init__(self, broker=None, system_id=None, stream=sys.stdout, incremental=False): - super(InsightsEvaluator, self).__init__(broker, stream=sys.stdout, incremental=incremental) + super(InsightsEvaluator, self).__init__(broker, stream=stream, incremental=incremental) self.system_id = system_id self.branch_info = {} self.product = "rhel"
Update __init__.py add missing map 0 to include all streams
@@ -200,7 +200,7 @@ class Converter(object): i += 1 os.rename(outfile, infile) - opts = ['-i', infile, '-c', 'copy'] + opts = ['-i', infile, '-c', 'copy', '-map', '0'] info = self.ffmpeg.probe(infile) i = len(info.attachment)
Extend contribution doc by IRC contact details Add details how to contact with trove members over IRC channel.
@@ -323,3 +323,13 @@ If you want to run only the tests in one file you can use testtools e.g. Note that some unit tests can use an existing database. The script ``tools/test-setup.sh`` sets up the database for CI jobs and can be used for local setup. + +Is there something missing? +--------------------------- + +Do not hesitate to chat and clear your doubts about Trove on +IRC: #openstack-trove <http://webchat.freenode.net/?channels=openstack-trove>`_ +on freenode.net. Also, we meet every week at #openstack-meeting-alt +<http://webchat.freenode.net/?channels=openstack-trove>`_ to discuss +ongoing issues. +
[airflow] fix error bug Test Plan: this is tricky to test since getting PythonErrors is unexepected behavior - open to ideas Reviewers: nate, max
message pipelineName } + ... on PythonError { + message + stack + } ... on ExecutePlanSuccess { pipeline { name
Remove Vagrant mount options Originally applied to account for cywgin. Not an issue on WSL and was responsible for masking challenge development errors (e.g. leaving off executable permissions.)
# vi: set ft=ruby : # TODO: -# - mount_options looks really fishy # - use double quote correctly require 'etc' @@ -20,9 +19,7 @@ Vagrant.configure("2") do |config| shell.vm.network "private_network", ip: (ENV['SIP'] || '192.168.2.3'), nic_type: "virtio" shell.vm.synced_folder ".", "/vagrant", disabled: true - shell.vm.synced_folder ".", "/picoCTF", - owner: "vagrant", group: "vagrant", - mount_options: ["dmode=775", "fmode=775"] + shell.vm.synced_folder ".", "/picoCTF", owner: "vagrant", group: "vagrant" # uses ansible_local so that a user does not need to have ansible installed shell.vm.provision :ansible_local do |ansible| @@ -55,9 +52,7 @@ Vagrant.configure("2") do |config| web.vm.network "private_network", ip: (ENV['WIP'] || '192.168.2.2'), nic_type: "virtio" web.vm.synced_folder ".", "/vagrant", disabled: true - web.vm.synced_folder ".", "/picoCTF", - owner: "vagrant", group: "vagrant", - mount_options: ["dmode=775", "fmode=775"] + web.vm.synced_folder ".", "/picoCTF", owner: "vagrant", group: "vagrant" # uses ansible_local so that a user does not need to have ansible installed web.vm.provision :ansible_local do |ansible|
Add mypy fallback class for TypedDict methods to mypy_extensions This class is not defined at runtime but it's used by mypy internally to support TypedDict methods. Use NoReturn in argument types for better type safety when the related mypy plugin hook is not active.
-from typing import Dict, Type, TypeVar, Optional, Union, Any, Generic +import abc +import sys +from typing import ( + Dict, Type, TypeVar, Optional, Union, Any, Generic, Mapping, ItemsView, KeysView, ValuesView +) _T = TypeVar('_T') _U = TypeVar('_U') +# Internal mypy fallback type for all typed dicts (does not exist at runtime) +class _TypedDict(Mapping[str, object], metaclass=abc.ABCMeta): + def copy(self: _T) -> _T: ... + # Using NoReturn so that only calls using mypy plugin hook that specialize the signature + # can go through. + def setdefault(self, k: NoReturn, default: object) -> object: ... + # Mypy plugin hook for 'pop' expects that 'default' has a type variable type. + def pop(self, k: NoReturn, default: _T = ...) -> object: ... + def update(self: _T, __m: _T) -> None: ... + if sys.version_info < (3, 0): + def has_key(self) -> bool: ... + def viewitems(self) -> ItemsView[str, object]: ... + def viewkeys(self) -> KeysView[str]: ... + def viewvalues(self) -> ValuesView[object]: ... + def __delitem__(self, k: NoReturn) -> None: ... + def TypedDict(typename: str, fields: Dict[str, Type[_T]], total: bool = ...) -> Type[dict]: ... def Arg(type: _T = ..., name: Optional[str] = ...) -> _T: ...
parent: avoid needless quoting in Argv. This just makes debug output a little more readable.
@@ -457,10 +457,16 @@ class Argv(object): def __init__(self, argv): self.argv = argv + must_escape = frozenset('\\$"`!') + must_escape_or_space = must_escape | frozenset(' ') + def escape(self, x): + if not self.must_escape_or_space.intersection(x): + return x + s = '"' for c in x: - if c in '\\$"`': + if c in self.must_escape: s += '\\' s += c s += '"'
Fix a minor identitation issue in the generated C API binding TN: minor
with Export => True, Convention => C, External_name => "${accessor_name}"; - ${ada_doc(field, lang='c')} + ${ada_doc(field, 3, lang='c')} </%def>
Fix bug in test_pkg Grains were being referenced but not required.
@@ -728,6 +728,7 @@ class PkgTest(ModuleCase, SaltReturnAssertsMixin): ret = self.run_state('pkg.removed', name=target) self.assertSaltTrueReturn(ret) + @requires_system_grains def test_pkg_014_installed_missing_release(self, grains=None): # pylint: disable=unused-argument ''' Tests that a version number missing the release portion still resolves
Update sharing-files.rst Updated: Channels > Work with Messages > Share Files > Attachment Limits and Sizes to: add a config settings link to configure file attachment maximum corrected the max image resolution link config setting
@@ -69,6 +69,6 @@ Other document previews (such as Word, Excel, or PPT) are not yet supported. Attachment Limits and Sizes --------------------------- -Up to 10 files can be attached per post. The default maximum file size is 100 MB, but this can be changed by the System Admin. +Up to 10 files can be attached per post. The default maximum file size is 100 MB, but this can be changed by the System Admin. See our `Configuration Settings <https://docs.mattermost.com/configure/configuration-settings.html#maximum-file-size>`__ product documentation for details. -Image files can be a maximum size of 7680 pixels x 4320 pixels, with a maximum image resolution of 33 MP (mega pixels) or 8K resolution, and a maximum raw image file size of approximately 253 MB. System Admins can customize the maximum image resolution size within the ``config.json`` file. See our `Configuration Settings <https://docs.mattermost.com/configure/configuration-settings.html#maximum-file-size>`__ product documentation for details. +Image files can be a maximum size of 7680 pixels x 4320 pixels, with a maximum image resolution of 33 MP (mega pixels) or 8K resolution, and a maximum raw image file size of approximately 253 MB. System Admins can customize the maximum image resolution size within the ``config.json`` file. See our `Configuration Settings <https://docs.mattermost.com/configure/configuration-settings.html#maximum-image-resolution>`__ product documentation for details.
fix: Stream feature view meta undefined created_timestamp issue fix stream feature view meta undefied created_timestap issue
@@ -28,8 +28,8 @@ const FeastSFVSchema = z.object({ }), }), meta: z.object({ - createdTimestamp: z.string().transform((val) => new Date(val)), - lastUpdatedTimestamp: z.string().transform((val) => new Date(val)), + createdTimestamp: z.string().transform((val) => new Date(val)).optional(), + lastUpdatedTimestamp: z.string().transform((val) => new Date(val)).optional(), }), });
Recruitment: reverse nomination order This makes review appear in chronological order, making it easier to say things like "^^ what they said".
@@ -120,7 +120,8 @@ class Reviewer: opening = f"<@&{Roles.mod_team}> <@&{Roles.admins}>\n{member.mention} ({member}) for Helper!" current_nominations = "\n\n".join( - f"**<@{entry['actor']}>:** {entry['reason'] or '*no reason given*'}" for entry in nomination['entries'] + f"**<@{entry['actor']}>:** {entry['reason'] or '*no reason given*'}" + for entry in nomination['entries'][::-1] ) current_nominations = f"**Nominated by:**\n{current_nominations}"
bi tree function HG-- branch : feature/microservices
@@ -229,21 +229,21 @@ class BIAPI(API): if node and node["id"] == p_id: return node else: - if node and "nodes" in node.keys(): - for child in node["nodes"]: + if node and "children" in node.keys(): + for child in node["children"]: _searched = search_parent(child, p_id) if _searched: return _searched else: return None - def sort_nodes(node): - if "nodes" not in node.keys(): + def sort_children(node): + if "children" not in node.keys(): return else: - node["nodes"] = sorted(node["nodes"], key=lambda x: x["text"]) - for n in node["nodes"]: - sort_nodes(n) + node["children"] = sorted(node["children"], key=lambda x: x["text"]) + for n in node["children"]: + sort_children(n) if "datasource" not in params: raise APIError("No datasource") @@ -312,12 +312,12 @@ class BIAPI(API): parent_id = col[0] if searched: if searched["id"] != col[0]: - if "nodes" not in searched.keys(): - searched["nodes"] = [] - if not col[0] in map(lambda x: x["id"], searched["nodes"]): - searched["nodes"].append({"id": col[0], "text": col[1]}) + if "children" not in searched.keys(): + searched["children"] = [] + if not col[0] in map(lambda x: x["id"], searched["children"]): + searched["children"].append({"id": col[0], "text": col[1]}) else: # start point - tree = {"id": col[0], "text": col[1], "nodes": []} + tree = {"id": col[0], "text": col[1], "children": []} - sort_nodes(tree) + sort_children(tree) return tree
Allow uri searches to use an index Example migration: Add a separate index by dataset. create index ix_agdc_dataset_location_dataset_ref on agdc.dataset_location (dataset_ref); Replace (dataset, uri) index with (uri, dataset) index. alter table agdc.dataset_location add constraint uq_dataset_location_uri_scheme unique (uri_scheme, uri_body, dataset_ref); alter table agdc.dataset_location drop constraint uq_dataset_location_dataset_ref;
@@ -75,7 +75,7 @@ DATASET = Table( DATASET_LOCATION = Table( 'dataset_location', _core.METADATA, Column('id', Integer, primary_key=True, autoincrement=True), - Column('dataset_ref', None, ForeignKey(DATASET.c.id), nullable=False), + Column('dataset_ref', None, ForeignKey(DATASET.c.id), index=True, nullable=False), # The base URI to find the dataset. # @@ -91,7 +91,7 @@ DATASET_LOCATION = Table( Column('added', DateTime(timezone=True), server_default=func.now(), nullable=False), Column('added_by', _sql.PGNAME, server_default=func.current_user(), nullable=False), - UniqueConstraint('dataset_ref', 'uri_scheme', 'uri_body'), + UniqueConstraint('uri_scheme', 'uri_body', 'dataset_ref'), ) # Link datasets to their source datasets.
update roi_objects.md Give more detail regarding default parameter
Find objects within a region of interest, either cut those objects to the region of interest or include objects that overlap with the region of interest. -**plantcv.roi_objects**(*img, roi_contour, roi_hierarchy, object_contour, obj_hierarchy, roi_type*) +**plantcv.roi_objects**(*img, roi_contour, roi_hierarchy, object_contour, obj_hierarchy, roi_type='partial'*) **returns** kept objects, object hierarchy, object mask, object area @@ -16,7 +16,7 @@ completely within the image. - roi_hierarchy = contour of roi, output from one of the pcv.roi subpackage functions - object_contour = contours of objects, output from "find_objects" function - obj_hierarchy = hierarchy of objects, output from "find_objects" function - - roi_type = 'cutto', 'partial' (for partially inside, default), or 'largest' (keep only the largest contour) + - roi_type = 'partial' (for partially inside, default), 'cutto', or 'largest' (keep only the largest contour) - **Context:** - Used to find objects within a region of interest and decide which ones to keep.
fix formatting of presets in util.cli This patch adds braces to the format string to correctly format the available presets in the usage string.
@@ -828,7 +828,7 @@ def cli(f, *, argv=None): usage = [f'USAGE: {progname}'] if doc.presets: - usage.append(f'["|".join(doc.presets)]') + usage.append(f'[{"|".join(doc.presets)}]') usage.extend(('{}' if arg in mandatory else '[{}]').format(f'{arg}={arg[0].upper()}') for arg in serializers) usage = '\n'.join(textwrap.wrap(' '.join(usage), subsequent_indent=' '))
CompileCtx: move fields types annotation to a compilation pass TN:
@@ -945,26 +945,14 @@ class CompileCtx(object): pass_manager = PassManager() pass_manager.add( GrammarRulePass('compile grammar rule', Parser.compile), + GlobalPass('annotate fields types', + CompileCtx.annotate_fields_types, + disabled=not annotate_fields_types), ) with names.camel_with_underscores: pass_manager.run(self) - if annotate_fields_types: - # Only import lib2to3 if the users needs it - import lib2to3.main - - astnodes_files = { - path.abspath(inspect.getsourcefile(n)) - for n in self.astnode_types - } - - lib2to3.main.main( - "langkit", - ["-f", "annotate_fields_types", - "--no-diff", "-w"] + list(astnodes_files) - ) - for i, astnode in enumerate( (astnode for astnode in self.astnode_types @@ -1293,3 +1281,22 @@ class CompileCtx(object): " not used by the grammar, and their types not annotated:" " {}".format(", ".join(t.name().camel for t in unresolved_types)) ) + + def annotate_fields_types(self): + """ + Modify the Python files where the node types are defined, to annotate + empty Field() definitions. + """ + # Only import lib2to3 if the users needs it + import lib2to3.main + + astnodes_files = { + path.abspath(inspect.getsourcefile(n)) + for n in self.astnode_types + } + + lib2to3.main.main( + "langkit", + ["-f", "annotate_fields_types", + "--no-diff", "-w"] + list(astnodes_files) + )
Test fix - wait for pod to delete before creating new pod Wait for pods to delete before using the pvc on another pod This will avoid multi-attach error
@@ -71,6 +71,7 @@ class TestPVCFullWithIORWO(ManageTest): log.info(f"FIO succeeded to fill the PVC with data") log.info(f"Deleting the pod and attaching the full PVC to a new pod") self.pod_obj.delete() + self.pod_obj.ocp.wait_for_delete(resource_name=self.pod_obj.name) log.info(f"Creating a new Pod with the existing full PVC") self.pod_obj = pod_factory(interface=self.interface, pvc=self.pvc_obj) used_space = get_used_space_on_mount_point(self.pod_obj)
[Test] use 100 instead of 1000 labels for labelselect 1000 labels with low folding creates very large, long-running test cases
@@ -83,7 +83,7 @@ def prepare_inputs(input_tensor, idt): @pytest.mark.parametrize("idt", [DataType.UINT8, DataType.UINT16, DataType.INT16]) # labels [email protected]("labels", [10, 1000]) [email protected]("labels", [10, 100]) # folding @pytest.mark.parametrize("fold", [-1, 2, 10]) # number of top labels to select
Use bytearray, not bytes, which is faster. Turns out Alexandros was right that this would be an issue!
@@ -23,7 +23,7 @@ class AlgebraicProtocol(asyncio.Protocol): self.receiveType = receiveType self.sendType = sendType self.transport = None - self.buffer = bytes() + self.buffer = bytearray() self.writelock = threading.Lock() self._logger = logging.getLogger(__name__) @@ -64,7 +64,7 @@ class AlgebraicProtocol(asyncio.Protocol): if toConsume: try: - self.messageReceived(deserialize(self.receiveType, toConsume)) + self.messageReceived(deserialize(self.receiveType, bytes(toConsume))) except Exception: self._logger.error("Error in AlgebraicProtocol: %s", traceback.format_exc()) self.transport.close()
Update control_heating_cooling_systems.py specify output types of vectorized function
@@ -345,8 +345,8 @@ def calc_simple_temp_control(tsd, bpr, weekday): :rtype: dict """ - tsd['ta_hs_set'] = np.vectorize(get_heating_system_set_point)(tsd['people'], range(HOURS_IN_YEAR), bpr, weekday) - tsd['ta_cs_set'] = np.vectorize(get_cooling_system_set_point)(tsd['people'], range(HOURS_IN_YEAR), bpr, weekday) + tsd['ta_hs_set'] = np.vectorize(get_heating_system_set_point)(tsd['people'], range(HOURS_IN_YEAR), bpr, weekday, otypes=[float]) + tsd['ta_cs_set'] = np.vectorize(get_cooling_system_set_point)(tsd['people'], range(HOURS_IN_YEAR), bpr, weekday, otypes=[float]) return tsd
Add BlockStatusStore trait This trait is used to pass the cache of block validation results to the block scheduler.
@@ -42,6 +42,10 @@ pub trait BlockValidator: Sync + Send + Clone { fn process_pending(&self, block: &Block, response_sender: Sender<BlockValidationResult>); } +pub trait BlockStatusStore { + fn status(&self, block_id: &str) -> BlockStatus; +} + #[derive(Clone, Debug)] pub struct BlockValidationResult { pub block_id: String,
File and Pillar roots are dictionaries Fixes test_smtp_return test cases.
@@ -64,8 +64,8 @@ class SMTPReturnerTestCase(TestCase, LoaderModuleMockMixin): 'renderer': 'jinja|yaml', 'renderer_blacklist': [], 'renderer_whitelist': [], - 'file_roots': [], - 'pillar_roots': [], + 'file_roots': {}, + 'pillar_roots': {}, 'cachedir': '/'}), \ patch('salt.returners.smtp_return.gnupg'), \ patch('salt.returners.smtp_return.smtplib.SMTP') as mocked_smtplib: @@ -77,8 +77,8 @@ class SMTPReturnerTestCase(TestCase, LoaderModuleMockMixin): 'renderer': 'jinja|yaml', 'renderer_blacklist': [], 'renderer_whitelist': [], - 'file_roots': [], - 'pillar_roots': [], + 'file_roots': {}, + 'pillar_roots': {}, 'cachedir': '/'}), \ patch('salt.returners.smtp_return.smtplib.SMTP') as mocked_smtplib: self._test_returner(mocked_smtplib)
A minor syntax correction Removed an extra quote - "
@@ -185,7 +185,7 @@ defined in your :meth:`~pytorch_lightning.core.lightning.LightningModule.configu .. warning:: * Before 1.3, Lightning automatically called ``lr_scheduler.step()`` in both automatic and manual optimization. From 1.3, ``lr_scheduler.step()`` is now for the user to call at arbitrary intervals. - * Note that the ``lr_dict`` keys, such as ``"step"`` and ``""interval"``, will be ignored even if they are provided in + * Note that the ``lr_dict`` keys, such as ``"step"`` and ``"interval"``, will be ignored even if they are provided in your :meth:`~pytorch_lightning.core.lightning.LightningModule.configure_optimizers` during manual optimization. Here is an example calling ``lr_scheduler.step()`` every step.
Add link to kaggle tutorial closes
@@ -4,6 +4,11 @@ External Resources Articles -------- +`Grammer of graphics with plotnine <https://www.kaggle.com/residentmario/grammer-of-graphics-with-plotnine-optional/>`_ + A good introductory tutorial on how to use ``plotnine``. It is part + of the `data visualization <https://www.kaggle.com/learn/data-visualisation>`_ + track from kaggle's free online `course <https://www.kaggle.com/learn/overview>`_. + `Comparing plotnine and ggpy <http://pltn.ca/plotnine-superior-python-ggplot/>`_ Compares how ``plotnine`` and ``ggpy`` match up in API and output with ``ggplot2``. If you have used ``ggpy`` and wondering what to
Update Missouri.md Closes
@@ -43,9 +43,9 @@ id: mo-kansascity-1 ### Kansas City police attempt to arrest a man leading the protest then spray the crowd | May 31st +Footage shows a protestor speaking to other protestors on a megaphone. Police grab him and drag him to arrest. Other protestors come to his aid and are pepper-sprayed. - -tags: arrest, tear-gas, protestor +tags: arrest, pepper-spray, spray, protestor id: mo-kansascity-2 @@ -55,6 +55,20 @@ id: mo-kansascity-2 * https://old.reddit.com/r/PublicFreakout/comments/guswxo/he_wasnt_even_addressing_the_police/ +### Police arrest peaceful protestor, pepper-spray others | May 31st + +With tactics remarkably similar to mo-kansascity-2, police grab a protestor standing alone in the street. Other protestors come to aid him and are summarily pepper-sprayed. He is taken behind the police line and arrested. + +tags: arrest, pepper-spray, spray, protestor + +id: mo-kansascity-8 + +**Links** + +* https://www.tiktok.com/@keraclark2/video/6833017428105055494 +* https://twitter.com/lovlitae/status/1268676585269473280 + + ### Police arrest man for speaking and tear-gas nearby protestors | June 1st A line of police stand well apart from a crowd of protestors, one of whom is speaking about the police's use of excessive force. Several officers move in to arrest the speaking man, pepper spraying him and others at point-blank range. The arrested man is dragged/pushed/falls face down onto the road and is pinned there by police.
Transfers: Shuffle equal-weight protocols. Fix Given equal-weight protocols eligible for transfer, shuffle the candidates in order to balance across several endpoints.
import copy import os +import random from urlparse import urlparse @@ -125,6 +126,8 @@ def select_protocol(rse_settings, operation, scheme=None, domain='wan'): raise exception.RSEProtocolDomainNotSupported('Domain %s not supported' % domain) candidates = _get_possible_protocols(rse_settings, operation, scheme, domain) + # Shuffle candidates to load-balance over equal sources + random.shuffle(candidates) return min(candidates, key=lambda k: k['domains'][domain][operation]) @@ -634,6 +637,10 @@ def find_matching_scheme(rse_settings_dest, rse_settings_src, operation_src, ope if not len(src_candidates) or not len(dest_candidates): raise exception.RSEProtocolNotSupported('No protocol for provided settings found : %s.' % str(rse_settings_dest)) + # Shuffle the candidates to load-balance across equal weights. + random.shuffle(dest_candidates) + random.shuffle(src_candidates) + # Select the one with the highest priority dest_candidates = sorted(dest_candidates, key=lambda k: k['domains'][domain][operation_dest]) src_candidates = sorted(src_candidates, key=lambda k: k['domains'][domain][operation_src])
Fix sign issue The fact that dice loss is negative but all other losses are positive caused problem when assessing improvement
@@ -461,7 +461,7 @@ def cmd_train(context): val_losses.append(val_loss_total_avg) if epoch > 1: - if (val_losses[-2] - val_losses[-1]) * 100 / val_losses[-1] < epsilon: + if (val_losses[-2] - val_losses[-1]) * 100 / abs(val_losses[-1]) < epsilon: patience_count += 1 if patience_count >= patience: print(f"Stopping training due to {patience} epochs without improvements")
Update README.rst capitalization for Stripe, URL
@@ -92,7 +92,7 @@ Add to the urls.py: url(r'^payments/', include('djstripe.urls', namespace="djstripe")), -Then tell stripe about the webhook (Stripe webhook docs can be found `here <https://stripe.com/docs/webhooks>`_) using the full url of your endpoint from the urls.py step above (e.g. ``https://yourwebsite.com/payments/webhook``). +Then tell Stripe about the webhook (Stripe webhook docs can be found `here <https://stripe.com/docs/webhooks>`_) using the full URL of your endpoint from the urls.py step above (e.g. ``https://yourwebsite.com/payments/webhook``). Run the commands::
Update the paths of keys and certs Also added instruction of supporting multiple switches.
@@ -4,19 +4,22 @@ This document outlines the steps needed to test that a switch supports self-sign ## Prepare the keys and certificates. ### Generate key pairs for the controller. - /usr/bin/openssl genrsa -out /tmp/ctrlr.key 2048 - /usr/bin/openssl req -new -x509 -nodes -days 3650 -subj '/C=US/ST=CA/L=Mountain View/O=Faucet/OU=Faucet/CN=CTRLR_1' -key /tmp/ctrlr.key -out /tmp/ctrlr.cert + /usr/bin/openssl genrsa -out /etc/ryu/ctrlr.key 2048 + /usr/bin/openssl req -new -x509 -nodes -days 3650 -subj '/C=US/ST=CA/L=Mountain View/O=Faucet/OU=Faucet/CN=CTRLR_1' -key /etc/ryu/ctrlr.key -out /etc/ryu/ctrlr.cert ### Generate key pairs for the switch. - /usr/bin/openssl genrsa -out /tmp/sw.key 2048 - /usr/bin/openssl req -new -x509 -nodes -days 3650 -subj '/C=US/ST=CA/L=Mountain View/O=Faucet/OU=Faucet/CN=SW_1' -key /tmp/sw.key -out /tmp/sw.cert + /usr/bin/openssl genrsa -out /etc/ryu/sw.key 2048 + /usr/bin/openssl req -new -x509 -nodes -days 3650 -subj '/C=US/ST=CA/L=Mountain View/O=Faucet/OU=Faucet/CN=SW_1' -key /etc/ryu/sw.key -out /etc/ryu/sw.cert ## Push the key pairs to the switch. -Copy /tmp/ctrlr.cert /tmp/sw.key and /tmp/sw.cert to the switch. Configure the switch to use the keys. For example, the command for OVS would be: +Copy /etc/ryu/ctrlr.cert /etc/ryu/sw.key and /etc/ryu/sw.cert to the switch. Configure the switch to use the keys. For example, the command for OVS would be: - ovs-vsctl set-ssl /tmp/sw.key /tmp/sw.cert /tmp/ctrlr.cert + ovs-vsctl set-ssl /etc/ryu/sw.key /etc/ryu/sw.cert /etc/ryu/ctrlr.cert ovs-vsctl set-controller br0 ssl:<ctrlr_ip>:6653 -## Start Faucet with the keys. -Update [faucet/tests/hw_switch_config.yaml](https://github.com/REANNZ/faucet/blob/master/tests/hw_switch_config.yaml) to match the configurations of the hardware switch. Start the Faucet controller: +## Start Faucet with the keys (make sure the keys are readable by the user that +starts the faucet process) - ryu-manager --ctl-privkey /tmp/ctrlr.key --ctl-cert /tmp/ctrlr.cert --ca-certs /tmp/sw.cert faucet.faucet --verbose + ryu-manager --ctl-privkey /etc/ryu/ctrlr.key --ctl-cert /etc/ryu/ctrlr.cert --ca-certs /etc/ryu/sw.cert faucet.faucet --verbose + +## Support multiple switches +To support multiple switches, generate key pairs for each switch, and concatenate their certificates into one file and use that file as */etc/ryu/sw.cert*.