message
stringlengths
13
484
diff
stringlengths
38
4.63k
Split long usecase text over multiple lines Fixes
@@ -22,6 +22,7 @@ class UseCaseItem(Classified, ElementPresentation): ), Text( text=lambda: self.subject.name or "", + width=lambda: self.width - 24, style={"font-weight": FontWeight.BOLD}, ), draw=draw_usecase,
0.8.2 changelog Test Plan: inspection Reviewers: alangenfeld
# Changelog -## 0.8.2 (Upcoming) +## 0.8.2 **Bugfix** -- Fixes issues with `dagster instance migrate` +- Fixes issues with `dagster instance migrate`. - Fixes bug in `launch_scheduled_execution` that would mask configuration errors. - Fixes bug in dagit where schedule related errors were not shown. +- Fixes JSON-serialization error in `dagster-k8s` when specifying per-step resources. **New** +- Makes `label` optional parameter for materializations with `asset_key` specified. +- Changes `Assets` page to have a typeahead selector and hierarchical views based on asset_key path. - _dagster-ssh_ - adds SFTP get and put functions to `SSHResource`, replacing sftp_solid. +**Docs** + +- Various docs corrections + ## 0.8.1 **Bugfix**
Updated style of loading indicator We don't have one true standard here, but make it look more like reports and mobile workers instead of the bright blue label that we don't use anywhere else.
</span> </div> - <div class="label label-primary label-lg hide" + <h4 id="loading" class="hide" data-bind="visible: fetchState() === 'pending', css: {hide: false}"> - <i class="fa fa-spin fa-spinner"></i> {% trans "Loading Versions..." %} - </div> + <i class="fa fa-spin fa-spinner"></i> + {% trans "Loading versions..." %} + </h4> <div class="alert alert-danger hide" data-bind="visible: fetchState() === 'error', css: {hide: false}">
add the Navitia API The [Navitia](https://www.navitia.io/) API is an open API build around transport data. you can query for journeys, isochrone, schedules, realtime data, ...
@@ -442,6 +442,7 @@ Please note a passing build status indicates all listed APIs are available since | Community Transit | Transitland API | No | Yes | [Go!](https://github.com/transitland/transitland-datastore/blob/master/README.md#api-endpoints) | | Goibibo | API for travel search | `apiKey` | Yes | [Go!](https://developer.goibibo.com/docs) | | Indian Railways | Indian Railways Information | `token` | No | [Go!](http://api.erail.in/) | +| Navitia | The open API for building cool stuff with transport data | `apiKey` | Yes | [Go!](https://api.navitia.io/) | | The Nomad List | A list of the best places to live/work remotely | No | Yes | [Go!](https://nomadlist.com/faq) | | Schiphol Airport | Schiphol | `apiKey` | Yes | [Go!](https://developer.schiphol.nl/) | | TransitLand | Transit Aggregation | No | Yes | [Go!](https://transit.land/documentation/datastore/api-endpoints.html) |
[Runtime][PipelineExecutor] Polish the name and comments of variable. Polish comments and variable name
@@ -515,8 +515,8 @@ class BackendRuntime { std::unordered_map<int, ModuleInputPairList> children_; /*\brief A map including the runtime input index and the notification data structure.*/ std::unordered_map<int, std::shared_ptr<DataNotify>> parents_notify_; - /*\brief The times of using pipeline function. */ - uint32_t statistic_pipeline_execute_times_ = 0; + /*\brief The execution count of the 'RunPipeline' function. */ + uint32_t pipeline_execution_count_ = 0; /*! *\brief In order to transfer data from one backend runtime to another, we need a local * tensor variable as a medium. "input_tensor_local_copy_" is a map including @@ -691,7 +691,7 @@ class BackendRuntime { * \brief Getting the times of using pipeline function. * \return The times of using pipeline function. */ - int GetExecutionCount() const { return statistic_pipeline_execute_times_; } + int GetExecutionCount() const { return pipeline_execution_count_; } /*! * \brief Initializing data structures for the pipeline execution. * \param config The pipeline configueration. @@ -768,7 +768,7 @@ class BackendRuntime { void RunPipeline() { Run(); ForwardingOutputDataToChildren(); - statistic_pipeline_execute_times_++; + pipeline_execution_count_++; } }; /*!
back out part of last change. The library works fine without the `__init__` but it's easier to mock out the properties this way.
@@ -2,6 +2,12 @@ from robot.libraries.BuiltIn import BuiltIn class BaseLibrary: + def __init__(self): + self._builtin = None + self._cumulusci = None + self._salesforce_api = None + self._salesforce = None + @property def salesforce(self): if getattr(self, "_salesforce", None) is None:
Fix example args in def table() Fix arg name from "name" to "table". Issue: TypeError: table() got an unexpected keyword argument 'name'
@@ -425,7 +425,7 @@ def table( Examples -------- >>> import awswrangler as wr - >>> df_table = wr.catalog.table(database='default', name='my_table') + >>> df_table = wr.catalog.table(database='default', table='my_table') """ client_glue: boto3.client = _utils.client(service_name="glue", session=boto3_session)
Add Callable and Mapping Also reorder for mypy
@@ -60,15 +60,25 @@ else: from collections import OrderedDict -if PYTHON_VERSION >= (3, 9): - from collections.abc import Iterable, Sequence +if PYTHON_VERSION < (3, 9): + from typing import ( + Callable, + Dict, + FrozenSet, + Iterable, + List, + Mapping, + Sequence, + Set, + Tuple, + ) +else: + from collections.abc import Callable, Iterable, Mapping, Sequence Dict = dict FrozenSet = frozenset List = list Set = set Tuple = tuple -else: - from typing import Dict, FrozenSet, Iterable, List, Sequence, Set, Tuple # PEP 616 string methods
nagios: Remove some default files. Nagios ships with a bunch of default configuration files that one needs to delete in order to configure it.
@@ -48,6 +48,15 @@ class zulip_ops::nagios { source => 'puppet:///modules/zulip_ops/pagerduty_nagios.pl', } + file { [ '/etc/nagios3/conf.d/extinfo_nagios2.cfg', + '/etc/nagios3/conf.d/services_nagios2.cfg', + '/etc/nagios3/conf.d/contacts_nagios2.cfg', + '/etc/nagios3/conf.d/hostgroups_nagios2.cfg', + '/etc/nagios3/conf.d/localhost_nagios2.cfg', + ]: + ensure => absent, + } + file { '/etc/nagios3/conf.d/zulip_nagios.cfg': ensure => file, mode => 644,
Don't trigger signature help outside completion scopes Fixes
@@ -53,6 +53,7 @@ class SignatureHelpListener(sublime_plugin.ViewEventListener): self.view = view self._initialized = False self._signature_help_triggers = [] # type: List[str] + self._signature_help_selector = view.settings().get("auto_complete_selector", "") or "" # type: str self._visible = False self._help = None # type: Optional[SignatureHelp] self._renderer = ColorSchemeScopeRenderer(self.view) @@ -84,6 +85,9 @@ class SignatureHelpListener(sublime_plugin.ViewEventListener): if not self._initialized: self.initialize() + if not self.view.match_selector(pos, self._signature_help_selector): + return + if self._signature_help_triggers: last_char = self.view.substr(pos - 1) if last_char in self._signature_help_triggers:
Convert docstring param type to type-hint in three_dimensions.py * Convert docstring param type to type-hint in three_dimensions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see
@@ -691,7 +691,7 @@ class Cone(Surface): Parameters ---------- - direction : :class:`numpy.array` + direction The direction of the apex. """ self.direction = direction
Add AllowDenyLists cog. This includes commands to add, remove and show the items in the whitelists and blacklists for the different list types. Commands are limited to Moderators+.
@@ -53,6 +53,7 @@ bot.load_extension("bot.cogs.verification") # Feature cogs bot.load_extension("bot.cogs.alias") +bot.load_extension("bot.cogs.allow_deny_lists") bot.load_extension("bot.cogs.defcon") bot.load_extension("bot.cogs.dm_relay") bot.load_extension("bot.cogs.duck_pond")
Qt receive widgets: do not show request that have expired They may still be retrieved from the menu
@@ -67,7 +67,7 @@ from electrum.util import (format_time, AddTransactionException, BITCOIN_BIP21_URI_SCHEME, InvoiceError, parse_max_spend) from electrum.invoices import PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice -from electrum.invoices import PR_PAID, PR_UNPAID, PR_FAILED, pr_expiration_values, Invoice +from electrum.invoices import PR_PAID, PR_UNPAID, PR_FAILED, PR_EXPIRED, pr_expiration_values, Invoice from electrum.transaction import (Transaction, PartialTxInput, PartialTransaction, PartialTxOutput) from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet, @@ -1317,12 +1317,19 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): addr = req.get_address() or '' amount_sat = req.get_amount_sat() or 0 address_help = '' if addr else _('Amount too small to be received onchain') + URI_help = '' lnaddr = req.lightning_invoice bip21_lightning = lnaddr if self.config.get('bip21_lightning', False) else None URI = req.get_bip21_URI(lightning=bip21_lightning) lightning_online = self.wallet.lnworker and self.wallet.lnworker.num_peers() > 0 can_receive_lightning = self.wallet.lnworker and amount_sat <= self.wallet.lnworker.num_sats_can_receive() - if lnaddr is None: + has_expired = self.wallet.get_request_status(key) == PR_EXPIRED + if has_expired: + URI_help = ln_help = address_help = _('This request has expired') + URI = lnaddr = address = '' + can_rebalance = False + can_swap = False + elif lnaddr is None: ln_help = _('This request does not have a Lightning invoice.') lnaddr = '' can_rebalance = False @@ -1362,6 +1369,7 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): self.receive_address_help_text.setText(address_help) self.receive_URI_e.setText(URI) self.receive_URI_qr.setData(URI) + self.receive_URI_help.setText(URI_help) self.receive_lightning_e.setText(lnaddr) # TODO maybe prepend "lightning:" ?? self.receive_lightning_help_text.setText(ln_help) self.receive_lightning_qr.setData(lnaddr_qr)
Describe usage of Pyccel container image This is a modified version of PR Describe usage of Pyccel container image usage based on generated images. As of now, images are available on Docker Hub (https://hub.docker.com/r/pyccel/pyccel/) and on the GitHub Container Registry (ghcr.io, private at the moment).
@@ -24,6 +24,16 @@ Pyccel comes with a selection of **extensions** allowing you to convert calls to - mpi4py - h5py (not available yet) +Pyccel Installation Methods +*************************** + +Pyccel can be installed on virtually any machine that provides Python 3, the pip package manager, a C/Fortran compiler, and an Internet connection. +Some advanced features of Pyccel require additional non-Python libraries to be installed, for which we provide detailed instructions below. + +Alternatively, Pyccel can be deployed through a **Linux Docker image** that contains all dependencies, and which can be setup with any version of Pyccel. +For more information, please read the section on `Pyccel container images`_. + + Requirements ============ @@ -56,6 +66,8 @@ We recommend using GFortran/Gcc and Open-MPI. Pyccel also depends on several Python3 packages, which are automatically downloaded by pip, the Python Package Installer, during the installation process. In addition to these, unit tests require the *scipy*, *mpi4py*, *pytest* and *coverage* packages, while building the documentation requires Sphinx <http://www.sphinx-doc.org/>. + + Linux Debian/Ubuntu/Mint ************************ @@ -224,6 +236,7 @@ Then build the documentation with:: Then, direct your browser to ``_build/html/index.html``. + Testing ======= @@ -231,6 +244,30 @@ To test your Pyccel installation please run the script *tests/run_tests_py3.sh* Continuous testing runs on Travis CI: <https://travis-ci.com/github/pyccel/pyccel> + +Pyccel Container Images +======================= + +Pyccel container images are available through both Docker Hub (docker.io) and the GitHub Container Registry (ghcr.io). + +The images: + +- are based on ubuntu:latest +- use distro packaged python3, gcc, gfortran, blas and openmpi +- support all pyccel releases except the legacy "0.1" + +Image tags match pyccel releases. + +In order to implement your pyccel accelerated code, you can use a host based volume during the pyccel container creation. + +For example:: + + docker pull pyccel/pyccel:v1.0.0 + docker run -it -v $PWD:/data:rw pyccel/pyccel:v1.0.0 bash + +If you are using SELinux, you will need to set the right context for your host based volume. +Alternatively you may have docker or podman set the context using -v $PWD:/data:rwz instead of -v $PWD:/data:rw . + Known bugs ==========
Pontoon: Update Indonesian (id) localization of AMO Localization authors: Ilham N.P. Timothy Aditya Sutantyo alamanda
@@ -4,8 +4,8 @@ msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-01 13:10+0000\n" -"PO-Revision-Date: 2016-11-03 22:05+0000\n" -"Last-Translator: eljuno <[email protected]>\n" +"PO-Revision-Date: 2018-08-31 18:55+0000\n" +"Last-Translator: Ilham N.P. <[email protected]>\n" "Language-Team: LANGUAGE <[email protected]>\n" "Language: id\n" "MIME-Version: 1.0\n" @@ -205,7 +205,7 @@ msgid "Use original" msgstr "Gunakan yang asli" msgid "Week of {0}" -msgstr "Minggu ke {0}" +msgstr "Pekan ke {0}" msgid "{0} download" msgid_plural "{0} downloads"
Mark block invalid on BlockValidationError Since we do this whenever there block validation fails, do it when we catch the exception.
@@ -316,7 +316,6 @@ class BlockValidator(object): blkw)) if not self._validate_permissions(blkw, prev_state_root): - blkw.status = BlockStatus.Invalid raise BlockValidationError( 'Block {} failed permission validation'.format(blkw)) @@ -336,22 +335,16 @@ class BlockValidator(object): validator_id=public_key) if not consensus_block_verifier.verify_block(blkw): - blkw.status = BlockStatus.Invalid raise BlockValidationError( 'Block {} failed {} consensus validation'.format( blkw, consensus)) if not self._validate_on_chain_rules(blkw, prev_state_root): - blkw.status = BlockStatus.Invalid raise BlockValidationError( 'Block {} failed on-chain validation rules'.format( blkw)) - try: self._validate_batches_in_block(blkw, prev_state_root) - except BlockValidationError: - blkw.status = BlockStatus.Invalid - raise # since changes to the chain-head can change the state of the # blocks in BlockStore we have to revalidate this block. @@ -366,6 +359,7 @@ class BlockValidator(object): blkw.status = BlockStatus.Valid except BlockValidationError as err: + blkw.status = BlockStatus.Invalid raise err except ChainHeadUpdated as e:
add decay parameter in ref_adagrad Summary: Pull Request resolved: Add decay parameter to match with C++ Adagrad implementation.
@@ -16,6 +16,7 @@ def ref_adagrad( using_fp16=False, output_effective_lr=False, output_effective_lr_and_update=False, + decay=1.0, row_wise=False, ): mom_in_f32 = mom_in @@ -25,9 +26,9 @@ def ref_adagrad( param_in_f32 = param_in.astype(np.float32) if row_wise: - mom_out = mom_in_f32 + np.mean(np.square(grad)) + mom_out = decay * mom_in_f32 + np.mean(np.square(grad)) else: - mom_out = mom_in_f32 + np.square(grad) + mom_out = decay * mom_in_f32 + np.square(grad) effective_lr = lr / (np.sqrt(mom_out) + epsilon) grad_adj = effective_lr * grad param_out = param_in_f32 + grad_adj
GlusterFS: add minor README note for Closes
@@ -55,7 +55,7 @@ defined: | Name | Default value | Description | |-------------------|---------------|-----------------------------------------| -| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]' +| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]' **NOTE:** You MUST set this as a host variable on each node host. For some reason, if you set this as a group variable it gets interpreted as a string rather than an array. See https://github.com/openshift/openshift-ansible/issues/5071 In addition, each host may specify the following variables to further control their configuration as GlusterFS nodes:
Fixed the Hello world code Without webview.start() it does nothing
@@ -27,6 +27,7 @@ _On Linux you need additional libraries. Refer to the [installation](https://pyw ``` python import webview webview.create_window('Hello world', 'https://pywebview.flowrl.com/hello') +webview.start() ``` Explore _pywebview_ further by reading [documentation](https://pywebview.flowrl.com/guide), [examples](https://pywebview.flowrl.com/examples) or [contributing](https://pywebview.flowrl.com/contributing) .
Update obliquerat.txt Full-path detection for compromised domains.
# Reference: https://www.virustotal.com/gui/file/f5b007c75d953ed91595c1b1217d5f5301ef8043ea812472ccdb53472a350de3/detection 81.61.77.92:3344 + +# Reference: https://blog.talosintelligence.com/2021/02/obliquerat-new-campaign.html +# Reference: https://www.virustotal.com/gui/file/0196bc9ac3db6f02cfa97323c8fce6cc7318b8f8fadb3e73bdf7971b3c541964/detection + +185.183.98.182:4701 +drivestransfer.com/myfiles/Dinner%20Invitation.doc/win10/Dinner%20Invitation.doc +iiaonline.in/DefenceLogo/theta.bmp +iiaonline.in/timon.jpeg +iiaonline.in/9999.jpg +iiaonline.in/merj.bmp +iiaonline.in/111.jpg +iiaonline.in/sasha.jpg +iiaonline.in/111.png +iiaonline.in/camela.bmp +larsentobro.com/mbda/goliath1.bmp +larsentobro.com/mbda/mundkol +micrsoft.ddns.net
Update quickstart.md minor 'typo' in command for training the model
@@ -30,7 +30,7 @@ Federal Master Trainer and Senior Instructor of the Italian Federation of Aerobi ### Step 2: Train the model ```bash -python train.py -data data/demo.train.pt -save_model demo-model +python train.py -data data/demo -save_model demo-model ``` The main train command is quite simple. Minimally it takes a data file
Relax matplotlib pin for py38 Test Plan: unit Reviewers: max, alangenfeld
'geopandas', 'google-api-python-client', 'google-cloud-storage', - 'matplotlib==3.0.2; python_version >= "3.5"', + 'matplotlib>=3.0.2; python_version >= "3.5"', 'matplotlib==2.2.4; python_version < "3.5"', 'mock', 'pytest-mock',
[Hexagon] Add hexagon_posix.cc to TVM/RT sources in the right place This file was added before the variable with TVM/RT was initialized. The initialization overwrote the addition.
@@ -209,13 +209,6 @@ if(USE_VM_PROFILER) list(APPEND COMPILER_SRCS ${BACKEND_VM_PROFILER_SRCS}) endif(USE_VM_PROFILER) -if(BUILD_FOR_HEXAGON) - # Add file implementing posix_memalign. - list(APPEND RUNTIME_SRCS src/runtime/hexagon/hexagon_posix.cc) - - add_definitions(-D_MACH_I32=int) -endif() - file(GLOB DATATYPE_SRCS src/target/datatype/*.cc) list(APPEND COMPILER_SRCS ${DATATYPE_SRCS}) @@ -229,6 +222,13 @@ file(GLOB RUNTIME_SRCS src/runtime/vm/*.cc ) +if(BUILD_FOR_HEXAGON) + # Add file implementing posix_memalign. + list(APPEND RUNTIME_SRCS src/runtime/hexagon/hexagon_posix.cc) + + add_definitions(-D_MACH_I32=int) +endif() + # Package runtime rules if(NOT USE_RTTI) add_definitions(-DDMLC_ENABLE_RTTI=0)
Prevent arbitrary test requirement files in pip installations etc. from interfering with the ccache. See
@@ -274,7 +274,7 @@ jobs: uses: hendrikmuhs/[email protected] with: variant: ${{ startsWith(runner.os, 'windows') && 'sccache' || 'ccache' }} # fake ternary - key: ${{ runner.os }}-hendrikmuhs-ccache${{ matrix.extra_hash }}-${{ matrix.python-version }}-${{ matrix.backend == 'c' || matrix.backend == 'c,cpp' }}-${{ contains(matrix.backend, 'cpp') }}-${{ hashFiles('**/test-requirements*.txt', '**/ci.yml', '**/ci-run.sh') }} + key: ${{ runner.os }}-hendrikmuhs-ccache${{ matrix.extra_hash }}-${{ matrix.python-version }}-${{ matrix.backend == 'c' || matrix.backend == 'c,cpp' }}-${{ contains(matrix.backend, 'cpp') }}-${{ hashFiles('test-requirements*.txt', '.github/**/ci.yml', '.github/**/ci-run.sh') }} max-size: ${{ env.CCACHE_MAXSIZE }} - name: Run CI
[skip ci] Fix for ping_reviewers wait time This was set to 1 day instead of 1 week cc
@@ -18,4 +18,4 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | set -eux - python tests/scripts/ping_reviewers.py --wait-time-minutes 1440 + python tests/scripts/ping_reviewers.py --wait-time-minutes 10080
Handling <end-time> of -1 in the segments file Update kaldi.py to support <end-time> equal to -1 in segment file.
@@ -125,13 +125,18 @@ def load_kaldi_data_dir( genders = load_kaldi_text_mapping(path / "spk2gender") languages = load_kaldi_text_mapping(path / "utt2lang") + # to support <end-time> == -1 in segments file + # https://kaldi-asr.org/doc/extract-segments_8cc.html + # <end-time> of -1 means the segment runs till the end of the WAV file supervision_set = SupervisionSet.from_segments( SupervisionSegment( id=fix_id(segment_id), recording_id=recording_id, start=float(start), duration=add_durations( - float(end), -float(start), sampling_rate=sampling_rate + float(end) if end != "-1" else durations[recording_id], + -float(start), + sampling_rate=sampling_rate, ), channel=0, text=texts[segment_id],
refactor: convert doctype transaction log db.sql get_current_index()
@@ -9,6 +9,7 @@ from frappe.model.document import Document from frappe.query_builder import DocType from frappe.utils import cint, now_datetime + class TransactionLog(Document): def before_insert(self): index = get_current_index() @@ -29,18 +30,15 @@ class TransactionLog(Document): def hash_line(self): sha = hashlib.sha256() sha.update( - frappe.safe_encode(str(self.row_index)) + \ - frappe.safe_encode(str(self.timestamp)) + \ - frappe.safe_encode(str(self.data)) + frappe.safe_encode(str(self.row_index)) + + frappe.safe_encode(str(self.timestamp)) + + frappe.safe_encode(str(self.data)) ) return sha.hexdigest() def hash_chain(self): sha = hashlib.sha256() - sha.update( - frappe.safe_encode(str(self.transaction_hash)) + \ - frappe.safe_encode(str(self.previous_hash)) - ) + sha.update(frappe.safe_encode(str(self.transaction_hash)) + frappe.safe_encode(str(self.previous_hash))) return sha.hexdigest()
Fix counter in ST_ForceResponseResults There was an error in the counter to build the results matrix. Insted of using counter "j" related to the result itself, it was using counter "i", related to the probe numbering.
@@ -1272,7 +1272,7 @@ class ST_ForcedResponseResults: probe_resp = np.zeros_like(self.magnitude[:, :, 0]) for j, mag in enumerate(self.magnitude): _probe_resp = operator @ np.vstack((mag[:, dofx], mag[:, dofy])) - probe_resp[i] = np.sqrt((_probe_resp[0] * np.cos(angle)) ** 2 + + probe_resp[j] = np.sqrt((_probe_resp[0] * np.cos(angle)) ** 2 + (_probe_resp[1] * np.sin(angle)) ** 2) # fmt: on @@ -1323,7 +1323,7 @@ class ST_ForcedResponseResults: ) color_i += 1 - fig.update_xaxes(title_text="<b>Frequency</b>") + fig.update_xaxes(title_text="<b>Frequency (rad/s)</b>") fig.update_yaxes(title_text=y_axis_label) fig.update_layout(**kwargs) @@ -1372,11 +1372,11 @@ class ST_ForcedResponseResults: probe_phase = np.zeros_like(self.phase[:, :, 0]) for j, phs in enumerate(self.phase): aux_phase = phs[:, p[0] * self.number_dof] - probe_phase[i] = np.array( + probe_phase[j] = np.array( [i + 2 * np.pi if i < 0 else i for i in aux_phase] ) angle = p[1] - probe_phase[i] = probe_phase[i] - angle + probe_phase[j] = probe_phase[j] - angle fig.add_trace( go.Scatter( @@ -1425,8 +1425,8 @@ class ST_ForcedResponseResults: ) color_i += 1 - fig.update_xaxes(title_text="<b>Frequency</b>") - fig.update_yaxes(title_text="<b>Phase Angle</b>") + fig.update_xaxes(title_text="<b>Frequency (rad/s)</b>") + fig.update_yaxes(title_text="<b>Phase Angle (rad)</b>") fig.update_layout(**kwargs), return fig @@ -1500,18 +1500,18 @@ class ST_ForcedResponseResults: probe_resp = np.zeros_like(self.magnitude[:, :, 0]) for j, mag in enumerate(self.magnitude): _probe_resp = operator @ np.vstack((mag[:, dofx], mag[:, dofy])) - probe_resp[i] = np.sqrt((_probe_resp[0] * np.cos(angle)) ** 2 + + probe_resp[j] = np.sqrt((_probe_resp[0] * np.cos(angle)) ** 2 + (_probe_resp[1] * np.sin(angle)) ** 2) # fmt: on probe_phase = np.zeros_like(self.phase[:, :, 0]) for j, phs in enumerate(self.phase): aux_phase = phs[:, p[0] * self.number_dof] - probe_phase[i] = np.array( + probe_phase[j] = np.array( [i + 2 * np.pi if i < 0 else i for i in aux_phase] ) angle = p[1] - probe_phase[i] = probe_phase[i] - angle + probe_phase[j] = probe_phase[j] - angle fig.add_trace( go.Scatterpolar(
allow drug resistance reconciliation to commit when passed since its confirmed now. Skip logging missing person cases since those should be orphaned cases from migration clean up [skip ci]
@@ -25,13 +25,11 @@ class Command(BaseModelReconciliationCommand): "retain_case_id", "retain_reason", "closed_case_ids", - "closed_extension_case_ids", - "notes" + "closed_extension_case_ids" ] def handle(self, *args, **options): - # self.commit = options.get('commit') - self.commit = False + self.commit = options.get('commit') self.recipient = (options.get('recipient') or '[email protected]') self.recipient = list(self.recipient) if not isinstance(self.recipient, basestring) else [self.recipient] self.result_file_name = self.setup_result_file() @@ -53,10 +51,6 @@ class Command(BaseModelReconciliationCommand): try: person_case = get_person_case_from_occurrence(DOMAIN, occurrence_case_id) except ENikshayCaseNotFound: - self.writerow({ - "occurrence_case_id": occurrence_case_id, - "notes": "person case not found" - }) return False self.person_case_id = person_case.case_id return super(Command, self).public_app_case(person_case)
Replace supervisord with systemd in LDAP troubleshooting Update the LDAP troubleshooting steps so that they are consistent with the rest of the documentaiton, which nowadays expects us to be running netbox via systemd instead of supervisord. Fixes
@@ -135,7 +135,7 @@ AUTH_LDAP_CACHE_TIMEOUT = 3600 ## Troubleshooting LDAP -`supervisorctl restart netbox` restarts the Netbox service, and initiates any changes made to `ldap_config.py`. If there are syntax errors present, the NetBox process will not spawn an instance, and errors should be logged to `/var/log/supervisor/`. +`systemctl restart netbox` restarts the Netbox service, and initiates any changes made to `ldap_config.py`. If there are syntax errors present, the NetBox process will not spawn an instance, and errors should be logged to `/var/log/messages`. For troubleshooting LDAP user/group queries, add the following lines to the start of `ldap_config.py` after `import ldap`.
Verification: move time constants above messages Allows referencing the constants within the message bodies.
@@ -16,6 +16,16 @@ from bot.utils.checks import InWhitelistCheckFailure, without_role_check log = logging.getLogger(__name__) +UNVERIFIED_AFTER = 3 # Amount of days after which non-Developers receive the @Unverified role +KICKED_AFTER = 30 # Amount of days after which non-Developers get kicked from the guild + +# Number in range [0, 1] determining the percentage of unverified users that are safe +# to be kicked from the guild in one batch, any larger amount will require staff confirmation, +# set this to 0 to require explicit approval for batches of any size +KICK_CONFIRMATION_THRESHOLD = 0 + +BOT_MESSAGE_DELETE_DELAY = 10 + ON_JOIN_MESSAGE = f""" Hello! Welcome to Python Discord! @@ -43,16 +53,6 @@ If you'd like to unsubscribe from the announcement notifications, simply send `! <#{constants.Channels.bot_commands}>. """ -UNVERIFIED_AFTER = 3 # Amount of days after which non-Developers receive the @Unverified role -KICKED_AFTER = 30 # Amount of days after which non-Developers get kicked from the guild - -# Number in range [0, 1] determining the percentage of unverified users that are safe -# to be kicked from the guild in one batch, any larger amount will require staff confirmation, -# set this to 0 to require explicit approval for batches of any size -KICK_CONFIRMATION_THRESHOLD = 0 - -BOT_MESSAGE_DELETE_DELAY = 10 - class Verification(Cog): """User verification and role self-management."""
update test for small contour in analyze_object The correction made to analyze_object for the case where the object is too small causes the test to fail. Really the function should return None if the object has less than 5 vertices.
@@ -939,7 +939,7 @@ def test_plantcv_analyze_object_small_contour(): # Test with debug = None pcv.params.debug = None obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask) - assert len(obj_images) != 0 + assert obj_images is None def test_plantcv_apply_mask_white():
Add missing LEXUS_NXH EPS f/w `@wiDa#7648` 2018 Lexus NX 300H DongleID/route 886a703694424882|2021-05-17--23-01-14
@@ -1443,8 +1443,9 @@ FW_VERSIONS = { b'881517804100\x00\x00\x00\x00', ], (Ecu.eps, 0x7a1, None): [ - b'8965B78100\x00\x00\x00\x00\x00\x00', b'8965B78060\x00\x00\x00\x00\x00\x00', + b'8965B78080\x00\x00\x00\x00\x00\x00', + b'8965B78100\x00\x00\x00\x00\x00\x00', ], (Ecu.fwdRadar, 0x750, 0xf): [ b'8821F4702300\x00\x00\x00\x00',
Added a docstring to paulistring module. Fixes:
# See the License for the specific language governing permissions and # limitations under the License. +"""Methods related to optimizing and transforming PauliStrings.""" + from cirq.contrib.paulistring.convert_to_pauli_string_phasors import ( ConvertToPauliStringPhasors,)
Fix typo: find_libcrypto > _find_libcrypto. Fix a small typo to give CI a kick.
@@ -127,7 +127,7 @@ class RSAX931Test(TestCase): @skipIf(not salt.utils.platform.is_windows(), "Host OS is not Windows.") def test_find_libcrypto_win32(self): """ - Test find_libcrypto on Windows hosts. + Test _find_libcrypto on Windows hosts. """ lib_path = _find_libcrypto() self.assertEqual(lib_path, "libeay32") @@ -138,7 +138,7 @@ class RSAX931Test(TestCase): ) def test_find_libcrypto_smartos(self): """ - Test find_libcrypto on a SmartOS host. + Test _find_libcrypto on a SmartOS host. """ lib_path = _find_libcrypto() self.assertTrue( @@ -150,7 +150,7 @@ class RSAX931Test(TestCase): @skipIf(not salt.utils.platform.is_sunos(), "Host OS is not Solaris-like.") def test_find_libcrypto_sunos(self): """ - Test find_libcrypto on a Solaris-like host. + Test _find_libcrypto on a Solaris-like host. """ lib_path = _find_libcrypto() passed = False @@ -163,7 +163,7 @@ class RSAX931Test(TestCase): @skipIf(not salt.utils.platform.is_aix(), "Host OS is not IBM AIX.") def test_find_libcrypto_aix(self): """ - Test find_libcrypto on an IBM AIX host. + Test _find_libcrypto on an IBM AIX host. """ lib_path = _find_libcrypto() if os.path.isdir("/opt/salt/lib"): @@ -176,7 +176,7 @@ class RSAX931Test(TestCase): @skipIf(not salt.utils.platform.is_darwin(), "Host OS is not Darwin-like or macOS.") def test_find_libcrypto_darwin(self): """ - Test find_libcrypto on a Darwin-like or macOS host. + Test _find_libcrypto on a Darwin-like or macOS host. """ lib_path = _find_libcrypto() passed = False @@ -195,7 +195,7 @@ class RSAX931Test(TestCase): @patch.object(sys, "platform", "unknown") def test_find_libcrypto_unsupported(self): """ - Ensure that find_libcrypto works correctly on an unsupported host OS. + Ensure that _find_libcrypto works correctly on an unsupported host OS. """ with self.assertRaises(OSError): _find_libcrypto()
Check environment variable PIPENV_CUSTOM_VENV_NAME Resolves
@@ -365,6 +365,9 @@ class Project: @property def virtualenv_name(self) -> str: + custom_name = os.getenv("PIPENV_CUSTOM_VENV_NAME") + if custom_name: + return custom_name sanitized, encoded_hash = self._get_virtualenv_hash(self.name) suffix = "" if self.s.PIPENV_PYTHON:
Fix in FM alarmdetail report HG-- branch : feature/microservices
@@ -185,7 +185,9 @@ class ReportAlarmDetailApplication(ExtApplication): [("timestamp", 1)]): dt = datetime.datetime.now() - a["timestamp"] duration = dt.days * 86400 + dt.seconds - if duration and (duration < min_duration or duration > max_duration): + if duration and duration < min_duration: + continue + if duration and max_duration and duration > max_duration: continue total_objects = sum( ss["summary"] for ss in a["total_objects"])
Fix default Guild env path Was env but we changed to venv recently and missed this.
@@ -309,8 +309,8 @@ class SSHRemote(remotelib.Remote): if not self.guild_env: return [] cwd = self.guild_env - if cwd.endswith("/env"): - cwd = cwd[:-4] + if cwd.endswith("/venv"): + cwd = cwd[:-5] return ["QUIET=1 source %s/bin/activate" % self.guild_env] def one_run(self, run_id_prefix, attrs):
properties/helpers.mako: minor reformatting TN:
No_${type_name} : constant ${type_name} := (Env => null); - function Convert - (Self : ${type_name}; From : ${sem_n}) return ${sem_n} + function Convert (Self : ${type_name}; From : ${sem_n}) return ${sem_n} with Inline; - function Convert - (Self : ${type_name}; From : ${sem_n}) - return ${sem_n} - is + function Convert (Self : ${type_name}; From : ${sem_n}) return ${sem_n} is % if not conv_prop.has_implicit_env: pragma Unreferenced (Self); % endif <%def name="generate_logic_equal(eq_prop)"> <% struct = eq_prop.struct.name() %> - function Eq_${eq_prop.uid} (L, R : ${T.sem_node.name()}) return Boolean - is + function Eq_${eq_prop.uid} (L, R : ${T.sem_node.name()}) return Boolean is (if L.El.all in ${struct}_Type'Class and then R.El.all in ${struct}_Type'Class then ${eq_prop.name} (${struct} (L.El), ${struct} (R.El)) Free (Self.Dbg_Img); end Free; - package ${package_name} is - new Predicate_${len(formal_node_types)} + package ${package_name} is new Predicate_${len(formal_node_types)} (${T.sem_node.name()}, Eq_Node.Refs.Raw_Logic_Var, ${type_name}, Free => Free);
Try to fetch account key in blob batch upload When a account name is implicitly specified in the destination URI.
@@ -702,9 +702,25 @@ def process_blob_upload_batch_parameters(namespace): raise ValueError('incorrect usage: destination cannot be a blob url') else: namespace.destination_container_name = identifier.container - if not namespace.account_name: + + if namespace.account_name: + if namespace.account_name != identifier.account_name: + raise ValueError( + 'The given storage account name is not consistent with the account name in the destination URI') + else: namespace.account_name = identifier.account_name + if not (namespace.account_key or namespace.sas_token or namespace.connection_string): + validate_client_parameters(namespace) + + # it is possible the account name be overwritten by the connection string + if namespace.account_name != identifier.account_name: + raise ValueError( + 'The given storage account name is not consistent with the account name in the destination URI') + + if not (namespace.account_key or namespace.sas_token or namespace.connection_string): + raise ValueError('Missing storage account credential information.') + # 3. collect the files to be uploaded namespace.source = os.path.realpath(namespace.source) namespace.source_files = [c for c in glob_files_locally(namespace.source, namespace.pattern)]
check if event type is keydown Fix problem where you have to hold f11 to switch fs/windowed due to not checking event type.
@@ -255,7 +255,7 @@ class Window(BaseWindow): ): self.close() - if self._fs_key is not None and event.key.keysym.sym == self._fs_key: + if self._fs_key is not None and event.key.keysym.sym == self._fs_key and event.type == sdl2.SDL_KEYDOWN: self.fullscreen = not self.fullscreen if event.type == sdl2.SDL_KEYDOWN:
Fix KeyError on incorrect genesis hash Fix KeyError that occurs when creating an exception for an incorrect genesis hash.
@@ -315,7 +315,7 @@ class DB(util.LoggedClass): genesis_hash = genesis_hash.decode() if genesis_hash != self.coin.GENESIS_HASH: raise self.DBError('DB genesis hash {} does not match coin {}' - .format(state['genesis_hash'], + .format(genesis_hash, self.coin.GENESIS_HASH)) self.db_height = state['height'] self.db_tx_count = state['tx_count']
fix: Move custom-actions under page-actions Change in custom-actions was affecting custom-actions in timeline the issue was introduced with
} } -.custom-actions { - display: flex; - align-items: center; -} - .page-actions { align-items: center; .btn { .custom-btn-group { display: inline-flex; } + + .custom-actions { + display: flex; + align-items: center; + } } .layout-main-section-wrapper {
Variable is not hashable So it is not confused with an ordinary number.
@@ -42,9 +42,6 @@ class Variable: for handler in self._handlers: handler(self) - def __hash__(self): - return object.__hash__(self) - strength = reversible_property(lambda s: s._strength) def dirty(self):
client: do not call items() in get_oldest This causes slowness if self._items is too large.
@@ -158,8 +158,10 @@ class LRUDict(object): Raises KeyError if dict is empty. """ - for item in self._items.items(): - return item + + # self._items.items() is slow if self._items has many items. + for key in self._items: + return (key, self._items[key]) raise KeyError('dictionary is empty') def pop_oldest(self):
tests: Remove test_private_message_policy from test_realm.py. This commit removes test_private_message_policy which is used to test changing private_message_policy using 'PATCH /realm' endpoint as we already do this in do_test_realm_update_api and invalid value is also tested in test_invalid_integer_attribute_values.
@@ -536,20 +536,6 @@ class RealmTest(ZulipTestCase): result = self.client_patch("/json/realm", req) self.assert_json_error(result, "Invalid user_group_edit_policy") - def test_private_message_policy(self) -> None: - # We need an admin user. - self.login("iago") - req = dict( - private_message_policy=orjson.dumps(Realm.PRIVATE_MESSAGE_POLICY_DISABLED).decode() - ) - result = self.client_patch("/json/realm", req) - self.assert_json_success(result) - - invalid_value = 10 - req = dict(private_message_policy=orjson.dumps(invalid_value).decode()) - result = self.client_patch("/json/realm", req) - self.assert_json_error(result, "Invalid private_message_policy") - def test_invalid_integer_attribute_values(self) -> None: integer_values = [key for key, value in Realm.property_types.items() if value is int]
[MNT] temporarily remove stochastically failing tapnet from tests This PR emergency-removes tapnet based estimators from tests to avoid failures unrelated to PR from CI. See for the bug.
@@ -29,6 +29,9 @@ EXCLUDE_ESTIMATORS = [ "RandomIntervalClassifier", "MiniRocket", "MatrixProfileTransformer", + # tapnet based estimators fail stochastically for unknown reasons, see #3525 + "TapNetRegressor", + "TapNetClassifier", ]
puppeteer_test/16: Fix "not secure" warning hiding the button. When typing the password in Firefox, it shows a "Not Secure" warning which was hiding the "#get_api_key_button". You can see the screenshot of it in This commit fixes that issue by focusing on the button.
@@ -76,6 +76,12 @@ async function test_get_api_key(page: Page): Promise<void> { await common.fill_form(page, "#api_key_form", { password: test_credentials.default_user.password, }); + + // When typing the password in Firefox, it shows "Not Secure" warning + // which was hiding the Get API Key button. + // You can see the screenshot of it in https://github.com/zulip/zulip/pull/17136. + // Focusing on it will remove the warning. + await page.focus(get_api_key_button_selector); await page.click(get_api_key_button_selector); await page.waitForSelector("#show_api_key", {visible: true});
Add "visual reasoning" as a keyword Address meta review
"common sense", "paraphrase", "context-free question answering", - "riddle" + "riddle", + "visual reasoning" ], "preferred_score": "multiple_choice_grade", "metrics": [
Added Factory Tests (Rev 1) Added 3 tests to test out new logging and CLI seeding features: - test_cli_seed - test_cli_seed_with_repeat - test_cli_verbosity * I had mistakenly added the file to the wrong directory in Rev 0 commit.
@@ -8,6 +8,8 @@ import string import sys from ipaddress import ip_address, ip_network +import logging + try: from StringIO import StringIO except ImportError: # pragma: no cover @@ -123,6 +125,57 @@ class FactoryTestCase(unittest.TestCase): finally: sys.stdout = orig_stdout + def test_cli_seed(self): + from faker.cli import Command + orig_stdout = sys.stdout + try: + sys.stdout = StringIO() + base_args = ['faker', 'address'] + target_args = ['--seed', '967'] + commands = [Command(base_args + target_args), Command(base_args + target_args)] + cli_output = [None] * 2 + for i in range(2): + commands[i].execute() + cli_output[i] = sys.stdout.getvalue() + cli_output[1] = cli_output[1][len(cli_output[0]):] + self.assertEqual(cli_output[0][:10], cli_output[1][:10]) + finally: + sys.stdout = orig_stdout + + def test_cli_seed_with_repeat(self): + from faker.cli import Command + orig_stdout = sys.stdout + try: + sys.stdout = StringIO() + base_args = ['faker', 'address', '-r', '3'] + target_args = ['--seed', '967'] + commands = [Command(base_args + target_args), Command(base_args + target_args)] + cli_output = [None] * 2 + for i in range(2): + commands[i].execute() + cli_output[i] = sys.stdout.getvalue() + cli_output[1] = cli_output[1][len(cli_output[0]):] + self.assertEqual(cli_output[0], cli_output[1]) + finally: + sys.stdout = orig_stdout + + def test_cli_verbosity(self): + from faker.cli import Command + orig_stdout = sys.stdout + try: + sys.stdout = StringIO() + base_args = ['faker', 'address', '--seed', '769'] + target_args = ['-v'] + commands = [Command(base_args), Command(base_args + target_args)] + cli_output = [None] * 2 + for i in range(2): + commands[i].execute() + cli_output[i] = sys.stdout.getvalue() + simple_output, verbose_output = cli_output + self.assertNotEqual(simple_output, verbose_output) + finally: + sys.stdout = orig_stdout + def test_slugify(self): slug = text.slugify("a'b/c") self.assertEqual(slug, 'abc')
compose.js: Add return true for subscribed streams. This is basically going to fix a regression which was introduced in which made code early return in case of subscribed streams.
@@ -530,6 +530,7 @@ exports.validate_stream_message_address_info = function (stream_name) { compose_error(response, $('#stream')); return false; } + return true; }; function validate_stream_message() {
Fix object detector models The ObjectDetection base class has been corrupt since commit removed proto attribute from the NnpLoader class.
@@ -84,8 +84,8 @@ class ObjectDetection(object): return input_var def get_nnp_input_size(self): - nnp_input_size = self.nnp.proto.network[0].variable[0].shape.dim[2:] - return nnp_input_size + inputs = self.nnp.get_network(self.nnp.get_network_names()[0]).inputs + return list(inputs.values())[0].shape[2:] def __call__(self, input_var=None, use_from=None, use_up_to='detection', training=False, returns_net=False, verbose=0): '''
Pass identity signer to _CandidateBlock This simplifies the interface a bit so that the signer associated with the public key does not need to be passed when finalizing.
@@ -132,7 +132,7 @@ class _CandidateBlock(object): max_batches, batch_injectors, settings_view, - signer_public_key, + identity_signer, ): self._pending_batches = [] self._pending_batch_ids = set() @@ -149,7 +149,7 @@ class _CandidateBlock(object): self._batch_injectors = batch_injectors self._settings_view = settings_view - self._signer_public_key = signer_public_key + self._identity_signer = identity_signer def __del__(self): self.cancel() @@ -286,7 +286,7 @@ class _CandidateBlock(object): if not enforce_validation_rules( self._settings_view, - self._signer_public_key, + self._identity_signer.get_public_key().as_hex(), self._pending_batches + batches_to_add): return @@ -308,7 +308,7 @@ class _CandidateBlock(object): return self._consensus.check_publish_block( self._block_builder.block_header) - def _sign_block(self, block, identity_signer): + def _sign_block(self, block): """ The block should be complete and the final signature from the publishing validator(this validator) needs to be added. @@ -316,10 +316,10 @@ class _CandidateBlock(object): :param identity_signer: the singer to sign the block with. """ header_bytes = block.block_header.SerializeToString() - signature = identity_signer.sign(header_bytes) + signature = self._identity_signer.sign(header_bytes) block.set_signature(signature) - def finalize(self, identity_signer, pending_batches): + def finalize(self, pending_batches): """Compose the final Block to publish. This involves flushing the scheduler, having consensus bless the block, and signing the block. @@ -427,7 +427,7 @@ class _CandidateBlock(object): return None builder.set_state_hash(state_hash) - self._sign_block(builder, identity_signer) + self._sign_block(builder) return builder.build_block() @@ -631,7 +631,7 @@ class BlockPublisher(object): max_batches, batch_injectors, SettingsView(state_view), - public_key) + self._identity_signer) for batch in self._pending_batches: if self._candidate_block.can_add_batch(): @@ -751,7 +751,6 @@ class BlockPublisher(object): self._candidate_block.injected_batch_ids last_batch = self._candidate_block.last_batch block = self._candidate_block.finalize( - self._identity_signer, pending_batches) self._candidate_block = None # Update the _pending_batches to reflect what we learned.
Temporary workaround for the main synchronization issue we will addres with this task
#!/usr/bin/env python + import jinja2 import tempfile import os @@ -9,6 +10,7 @@ import random import re import traceback import subprocess +import threading from xos.config import Config, XOS_DIR from xos.logger import observer_logger as logger from multiprocessing import Process, Queue @@ -47,7 +49,7 @@ def get_playbook_fn(opts, path): return (opts, os.path.join(pathed_sys_dir,objname)) -def run_playbook(ansible_hosts, ansible_config, fqp, opts, q): +def run_playbook(ansible_hosts, ansible_config, fqp, opts):#, q): try: if ansible_config: os.environ["ANSIBLE_CONFIG"] = ansible_config @@ -80,9 +82,19 @@ def run_playbook(ansible_hosts, ansible_config, fqp, opts, q): stats = None aresults = None - q.put([stats,aresults]) + #q.put([stats,aresults]) + return (stats,aresults) def run_template(name, opts, path='', expected_num=None, ansible_config=None, ansible_hosts=None, run_ansible_script=None, object=None): + global uglylock + try: + if (uglylock): + pass + except NameError: + uglylock = threading.Lock() + + uglylock.acquire() + template = os_template_env.get_template(name) buffer = template.render(opts) @@ -92,11 +104,16 @@ def run_template(name, opts, path='', expected_num=None, ansible_config=None, an f.write(buffer) f.flush() + """ q = Queue() p = Process(target=run_playbook, args=(ansible_hosts, ansible_config, fqp, opts, q,)) p.start() stats,aresults = q.get() p.join() + """ + stats,aresults = run_playbook(ansible_hosts,ansible_config,fqp,opts) + + uglylock.release() output_file = fqp + '.out' try:
fix setupdatabase() to use latest db version ARMui setupdatabase() was still pinned to old database version
@@ -236,7 +236,7 @@ def setupdatabase(): db.create_all() db.session.commit() # push the database version arm is looking for - user = Alembic_version('e688fe04d305') + user = Alembic_version('9cae4aa05dd7') db.session.add(user) db.session.commit() return True
optimize ragdolls ragdolls with physics disabled do not init properly, and collect at 0,0,0 also, set fade time to -1 so that ragdolls can fade as soon as possible
@@ -609,16 +609,15 @@ alias props_ultra "r_decalstaticprops 1;cl_phys_props_enable 1;r_drawdetailprops //cl_ragdoll_fade_time 5 // Fade out ragdolls in 5 seconds, which is the cutoff for special ragdoll effects //cl_ragdoll_forcefade 1 // Effectively disables ragdolls by instantly fading them //cl_ragdoll_forcefade 0 // Do not instantly fade ragdolls -//cl_ragdoll_physics_enable 1 // Enable ragdolls physics for meaningful ragdolls -//cl_ragdoll_physics_enable 0 // Disable ragdoll physics, where most of the performance from ragdolls comes from +cl_ragdoll_physics_enable 1 // Enable ragdolls physics for meaningful ragdolls, also inits ragdolls properly //cl_ragdoll_collide 1 // Enable ragdoll collisions //cl_ragdoll_collide 0 // Disable ragdoll collisions //ragdoll_sleepaftertime 1.5 // Wait a reasonable time before sleeping ragdoll physics //ragdoll_sleepaftertime 0 // Instantly sleep ragdolls -alias ragdolls_off "cl_ragdoll_fade_time 0;cl_ragdoll_forcefade 1;cl_ragdoll_physics_enable 0;cl_ragdoll_collide 0;ragdoll_sleepaftertime 0" +alias ragdolls_off "cl_ragdoll_fade_time -1;cl_ragdoll_forcefade 1;cl_ragdoll_collide 0;ragdoll_sleepaftertime 0" alias ragdolls_medium "cl_ragdoll_fade_time 5;cl_ragdoll_forcefade 0;cl_ragdoll_physics_enable 1;cl_ragdoll_collide 0;ragdoll_sleepaftertime 1.5" -alias ragdolls_high "cl_ragdoll_fade_time 10;cl_ragdoll_forcefade 0;cl_ragdoll_physics_enable 1;cl_ragdoll_collide 1;ragdoll_sleepaftertime 5.0f" +alias ragdolls_high "cl_ragdoll_fade_time 10;cl_ragdoll_forcefade 0;cl_ragdoll_collide 1;ragdoll_sleepaftertime 5.0f" // --------------- // '-- General --'
Deseasonify: info log on help cog load Keep it consistent with all other cogs.
# Help command from Python bot. All commands that will be added to there in futures should be added to here too. import asyncio import itertools +import logging from collections import namedtuple from contextlib import suppress from typing import Union @@ -30,6 +31,8 @@ REACTIONS = { Cog = namedtuple('Cog', ['name', 'description', 'commands']) +log = logging.getLogger(__name__) + class HelpQueryNotFound(ValueError): """ @@ -537,6 +540,8 @@ def setup(bot: SeasonalBot) -> None: except Exception: unload(bot) raise + else: + log.info("Help cog loaded") def teardown(bot: SeasonalBot) -> None:
Update v_vacuum_summary.sql added column vac_deleted_rows for number of deleted rows
@@ -27,6 +27,7 @@ CREATE OR REPLACE VIEW admin.v_vacuum_summary as SELECT a.userid, END AS vac_duration_secs, a."rows" AS vac_start_rows, b."rows" AS vac_end_rows, + a."rows" - b."rows" AS vac_deleted_rows, a.sortedrows AS vac_start_sorted_rows, b.sortedrows AS vac_end_sorted_rows, a."blocks" AS vac_start_blocks,
Bugfix port not specified using app run This fixes
@@ -1318,7 +1318,7 @@ class Quart(Scaffold): host = sn_host or "127.0.0.1" if port is None: - port = int(sn_port) or 5000 + port = int(sn_port or "5000") task = self.run_task( host,
Update Facter link to not be version-specific Thanks for Graham for the tip.
# Sal [![CircleCI](https://circleci.com/gh/salopensource/sal.svg?style=svg)](https://circleci.com/gh/salopensource/sal) -Sal is a multi-tenanted reporting dashboard for [Munki](https://github.com/munki/munki/) with the ability to display information from [Facter](https://puppet.com/docs/puppet/6.10/facter.html). It has a plugin system allowing you to easily build widgets to display your custom information from Facter, Grains, Munki's [conditional items](https://github.com/munki/munki/wiki/Conditional-Items) etc. +Sal is a multi-tenanted reporting dashboard for [Munki](https://github.com/munki/munki/) with the ability to display information from [Facter](https://puppet.com/docs/puppet/latest/facter.html). It has a plugin system allowing you to easily build widgets to display your custom information from Facter, Grains, Munki's [conditional items](https://github.com/munki/munki/wiki/Conditional-Items) etc. With Sal, you are able to allow access to reports on certain sets of machines to certain people - for example, giving a manager access to the reports on the machines in their department.
TST: added old test back in Added the old xarray test back in.
@@ -509,6 +509,24 @@ class TestLoadNetCDF4XArray(): # Clear the class attributes del self.data_path, self.tempdir, self.testInst, self.stime + def test_basic_write_and_read_netcdf4_default_format(self): + """ Test basic netCDF4 writing and reading.""" + # Write the output test data + outfile = os.path.join(self.testInst.files.data_path, + 'pysat_test_ncdf.nc') + self.testInst.load(date=self.stime) + self.testInst.data.to_netcdf(outfile) + + # Load the written data + self.loaded_inst, meta = pysat.utils.load_netcdf4( + outfile, pandas_format=self.testInst.pandas_format) + + # Compare the initial and loaded data + for key in self.testInst.data.data_vars.keys(): + assert(np.all(self.testInst[key] == self.loaded_inst[key])) + + return + def test_load_netcdf4_pandas_3d_error(self): """ Test load_netcdf4 error with a pandas 3D file """
Add quote_via argument to urlencode Fixes
# Stubs for urllib.parse -from typing import Any, List, Dict, Tuple, AnyStr, Generic, overload, Sequence, Mapping, Union, NamedTuple +from typing import Any, List, Dict, Tuple, AnyStr, Generic, overload, Sequence, Mapping, Union, NamedTuple, Callable +import sys __all__ = ( 'urlparse', @@ -123,6 +124,14 @@ def urldefrag(url: str) -> DefragResult: ... @overload def urldefrag(url: bytes) -> DefragResultBytes: ... +if sys.version_info >= (3, 5): + def urlencode(query: Union[Mapping[Any, Any], + Mapping[Any, Sequence[Any]], + Sequence[Tuple[Any, Any]], + Sequence[Tuple[Any, Sequence[Any]]]], + doseq: bool = ..., safe: AnyStr = ..., encoding: str = ..., errors: str = ..., + quote_via: Callable[[str, AnyStr, str, str], str] = ...) -> str: ... +else: def urlencode(query: Union[Mapping[Any, Any], Mapping[Any, Sequence[Any]], Sequence[Tuple[Any, Any]],
[jax2tf] Fix lowering for tf.while_loop Sometimes TF infers more specific shapes for the init_carry, and this has led to errors: "enters the loop with shape (1,), but has shape (None,) after one iteration" Unfortunately, I cannot construct a small test.
@@ -2751,7 +2751,12 @@ def _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr, body_tf_func = partial(_interpret_jaxpr, body_jaxpr, *body_consts, extra_name_stack="while/body") - return tf.while_loop(cond_tf_func, body_tf_func, init_carry) + # Sometimes TF infers more specific shapes for the init_carry, and this has + # led to errors: "enters the loop with shape (1,), but has shape (None,) after one iteration" + shape_invariants = [tf.TensorShape(_aval_to_tf_shape(_out_aval)) + for _out_aval in body_jaxpr.out_avals] + return tf.while_loop(cond_tf_func, body_tf_func, init_carry, + shape_invariants=shape_invariants) def _batched_cond_while(*args: TfVal, cond_nconsts: int,
blob backend migrator that checks destination first The idea here is that it should be much faster to check the destination first and only copy data that is missing. This is useful to do after a previous migration to validate the migration
@@ -296,6 +296,21 @@ class BlobDbBackendMigrator(BaseDocMigrator): print(self.filename) +class BlobDbBackendCheckMigrator(BlobDbBackendMigrator): + def migrate(self, doc): + meta = doc["_obj_not_json"] + self.total_blobs += 1 + if not self.db.new_db.exists(key=meta.key): + try: + content = self.db.old_db.get(key=meta.key) + except NotFound: + self.save_backup(doc) + else: + with content: + self.db.copy_blob(content, key=meta.key) + return True + + class BlobMetaReindexAccessor(ReindexAccessor): model_class = BlobMeta @@ -406,11 +421,11 @@ class BackendMigrator(Migrator): has_worker_pool = True - def __init__(self, slug): + def __init__(self, slug, doc_migrator_class): reindexer = BlobMetaReindexAccessor() types = [reindexer.model_class] assert not hasattr(types[0], "get_db"), types[0] # not a couch model - super(BackendMigrator, self).__init__(slug, types, BlobDbBackendMigrator) + super(BackendMigrator, self).__init__(slug, types, doc_migrator_class) self.reindexer = reindexer def get_doc_migrator(self, filename, date_range=None, **kw): @@ -493,7 +508,8 @@ def _migrator_with_worker_pool(migrator, reindexer, iterable, max_retry, num_wor MIGRATIONS = {m.slug: m for m in [ - BackendMigrator("migrate_backend"), + BackendMigrator("migrate_backend", BlobDbBackendMigrator), + BackendMigrator("migrate_backend_check", BlobDbBackendCheckMigrator), migrate_metadata, # Kept for reference when writing new migrations. # Migrator("applications", [
new 16Q device map this is for ibmq_guadalupe, the only 16Q system available. longer-term it would be nice if the backend could provide its own preferred layout rather than needing to hard-code it here.
@@ -142,9 +142,9 @@ def plot_gate_map(backend, figsize=None, [0, 5], [0, 6], [1, 7], [1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1], [1, 0]] - mpl_data[16] = [[1, 0], [0, 0], [0, 1], [0, 2], [0, 3], - [0, 4], [0, 5], [0, 6], [0, 7], [1, 7], - [1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1]] + mpl_data[16] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2], + [3, 2], [0, 3], [1, 3], [3, 3], [4, 3], + [1, 4], [3, 4], [1, 5], [2, 5], [3, 5], [1, 6]] mpl_data[27] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2], [3, 2], [0, 3], [1, 3], [3, 3], [4, 3],
Update comment I can also remove the `for` loop on this request if 's comment on here is correct:
@@ -60,7 +60,7 @@ tmp_dir=$(mktemp -d "/tmp/verify-published-package.XXXXXXXXXXXXXXXX") cd "${tmp_dir}" trap "{ rm -rf ${tmp_dir}; }" EXIT -# Test both the python 2 and python 3 versions. +# Test python 3 versions. for PYTHON_VERSION in python3; do # Prepare. RUNTIME_DEPS_FILE="${REPO_ROOT}/requirements.txt"
Fix ReportUnclassifiedOIDs and ReportMissedMIBs HG-- branch : feature/microservices
@@ -17,21 +17,13 @@ from noc.core.translation import ugettext as _ class ReportUnclassifiedOIDs(SimpleReport): title = _("Unclassified Trap OIDs") - c_f = """ - function() { - var c = {} - db[collection].find(query, {"vars": 1}).forEach(function(doc) { - var oid = doc.vars.trap_oid; - c[oid] = (c[oid] || 0) + 1; - }); - return c; - } - """ - def get_data(self, **kwargs): c = EventClass.objects.filter(name="Unknown | SNMP Trap").first() - oids = ActiveEvent.objects.filter(event_class=c.id).exec_js(self.c_f) - data = [(o, MIB.get_name(o), c) for o, c in oids.items()] + pipeline = [{"$match": {"event_class": c.id}}, + {"$project": {"vars": 1}}, + {"$group": {"_id": "$vars.trap_oid", "count": {"$sum": 1}}}] + oids = ActiveEvent._get_collection().aggregate(pipeline) + data = [(e["_id"], MIB.get_name(e["_id"]), e["count"]) for e in oids] data = sorted(data, key=lambda x: -x[2]) return self.from_dataset(title=self.title, columns=["OID", "Name",
Add lus to aww_no_submissions.txt Added mizo translations to aww_no_submissions.txt
{% if more_than_one_week %} -AWC {{ awc }} has not submitted any form or done any activity in ICDS CAS application in more than one week. Please follow up with Block Level Helpdesk or Supervisor for assistance +AWC {{ awc }} hian kar khat chhungin application ah hian eng form mah a thehlut lo va, engmah hmalakna a neilo a. Block Level Tanpuitu emaw Circlre Officer hnenah puibawm turin ngen ang che {% endif %} {% if more_than_one_month %} -AWC {{ awc }} has not submitted any form or done any activity in ICDS CAS application in more than one month. Please follow up with Block Level Helpdesk or Supervisor for assistance +AWC {{ awc }} hian thla khat chuang application ah hian engmah hmalakna a/an neilo a. Block Level Tanpuitu emaw Circle Officer hnenah emaw puibawm turin ngen ang che {% endif %}
Add error handlers for more command exceptions MissingPermissions, CheckFailure, DisabledCommand, and CommandOnCooldown will now have a simple message logged. * Log BotMissingPermissions and remove sending missing permissions as a message
@@ -4,9 +4,13 @@ import logging from discord.ext.commands import ( BadArgument, BotMissingPermissions, + CheckFailure, CommandError, CommandInvokeError, CommandNotFound, + CommandOnCooldown, + DisabledCommand, + MissingPermissions, NoPrivateMessage, UserInputError, ) @@ -58,10 +62,12 @@ class ErrorHandler: elif isinstance(e, NoPrivateMessage): await ctx.send("Sorry, this command can't be used in a private message!") elif isinstance(e, BotMissingPermissions): - await ctx.send( - f"Sorry, it looks like I don't have the permissions I need to do that.\n\n" - f"Here's what I'm missing: **{e.missing_perms}**" - ) + await ctx.send(f"Sorry, it looks like I don't have the permissions I need to do that.") + log.warning(f"The bot is missing permissions to execute command {command}: {e.missing_perms}") + elif isinstance(e, MissingPermissions): + log.debug(f"{ctx.message.author} is missing permissions to invoke command {command}: {e.missing_perms}") + elif isinstance(e, (CheckFailure, CommandOnCooldown, DisabledCommand)): + log.debug(f"Command {command} invoked by {ctx.message.author} with error {e.__class__.__name__}: {e}") elif isinstance(e, CommandInvokeError): if isinstance(e.original, ResponseCodeError): if e.original.response.status == 404: @@ -77,7 +83,6 @@ class ErrorHandler: "Got an unexpected status code from the " f"API (`{e.original.response.code}`)." ) - else: await ctx.send( f"Sorry, an unexpected error occurred. Please let us know!\n\n```{e}```"
Update h3x_feeds.py You are currently trying to hit the full day export every hour while he provides an hourly export.
@@ -13,7 +13,7 @@ class MalwareCorpusTracker(Feed): default_values = { "frequency": timedelta(hours=1), "name": "MalwareCorpusTracker", - "source": "http://tracker.h3x.eu/api/sites_1day.php", + "source": "http://tracker.h3x.eu/api/sites_1hour.php", "description": "This feed contains known Malware C2 servers", } @@ -45,9 +45,7 @@ class MalwareCorpusTracker(Feed): context['last_seen'] = last_seen context['source'] = self.name - tags = [] - tags.append(family.lower()) - tags.append(type_.lower()) + tags = [family.lower(), type_.lower()] try: url = Url.get_or_create(value=url)
Fixed issue with automated transcript generation via "history -t" There is now a post-processing step which escapes all "/" characters which transcript testing treats as a regex escape if there isn't a "\" to esape it.
@@ -1913,6 +1913,13 @@ a..b, a:b, a:, ..b items by indices (inclusive) # Set echo back to its original state self.echo = saved_echo + # Post-process the file to escape un-escaped "/" regex escapes + with open(args.transcript, 'r') as fin: + data = fin.read() + post_processed_data = data.replace('/', '\/') + with open(args.transcript, 'w') as fout: + fout.write(post_processed_data) + plural = 's' if len(history) > 1 else '' self.pfeedback('{} command{} and outputs saved to transcript file {!r}'.format(len(history), plural, args.transcript))
Consolidate client usage in MinBwAllocationPlacementTest Let's only use admin client for the operations that are really needing it. blueprint: qos-minimum-guaranteed-packet-rate
@@ -68,7 +68,7 @@ class NetworkQoSPlacementTestBase(manager.NetworkScenarioTest): cls.qos_client = cls.os_admin.qos_client cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client cls.flavors_client = cls.os_adm.flavors_client - cls.servers_client = cls.os_adm.servers_client + cls.servers_client = cls.os_primary.servers_client def _create_flavor_to_resize_to(self): old_flavor = self.flavors_client.show_flavor( @@ -189,7 +189,7 @@ class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase): server = self.create_server(networks=[{'port': port['id']}], wait_until=wait_until) waiters.wait_for_server_status( - client=self.os_primary.servers_client, server_id=server['id'], + client=self.servers_client, server_id=server['id'], status=status, ready_wait=False, raise_on_error=False) return server, port @@ -290,9 +290,9 @@ class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase): self._assert_allocation_is_as_expected(server['id'], [valid_port['id']]) - self.servers_client.migrate_server(server_id=server['id']) + self.os_adm.servers_client.migrate_server(server_id=server['id']) waiters.wait_for_server_status( - client=self.os_primary.servers_client, server_id=server['id'], + client=self.servers_client, server_id=server['id'], status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False) # TODO(lajoskatona): Check that the allocations are ok for the @@ -300,9 +300,10 @@ class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase): self._assert_allocation_is_as_expected(server['id'], [valid_port['id']]) - self.servers_client.confirm_resize_server(server_id=server['id']) + self.os_adm.servers_client.confirm_resize_server( + server_id=server['id']) waiters.wait_for_server_status( - client=self.os_primary.servers_client, server_id=server['id'], + client=self.servers_client, server_id=server['id'], status='ACTIVE', ready_wait=False, raise_on_error=True) self._assert_allocation_is_as_expected(server['id'], [valid_port['id']]) @@ -332,7 +333,7 @@ class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase): self.servers_client.resize_server( server_id=server['id'], flavor_ref=new_flavor['id']) waiters.wait_for_server_status( - client=self.os_primary.servers_client, server_id=server['id'], + client=self.servers_client, server_id=server['id'], status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False) # TODO(lajoskatona): Check that the allocations are ok for the @@ -342,7 +343,7 @@ class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase): self.servers_client.confirm_resize_server(server_id=server['id']) waiters.wait_for_server_status( - client=self.os_primary.servers_client, server_id=server['id'], + client=self.servers_client, server_id=server['id'], status='ACTIVE', ready_wait=False, raise_on_error=True) self._assert_allocation_is_as_expected(server['id'], [valid_port['id']])
MAINT: Correct ContrastResults Ensure pvalue is defined
@@ -57,6 +57,8 @@ class ContrastResults: self.pvalue = np.full_like(value, np.nan) not_nan = ~np.isnan(value) self.pvalue[not_nan] = self.dist.sf(np.abs(value[not_nan])) * 2 + else: + self.pvalue = np.nan # cleanup # should we return python scalar?
Fire requests through Environment.events Example Grpc client not reporting stats when Locust is running as a library. Fixed by firing requests through Environment.events.request.fire instead of locust.events.request.fire Related Issue:
@@ -23,7 +23,8 @@ def run_grpc_server(environment, **_kwargs): class GrpcClient: - def __init__(self, stub): + def __init__(self, environment, stub): + self.env = environment self._stub_class = stub.__class__ self._stub = stub @@ -47,7 +48,7 @@ class GrpcClient: except grpc.RpcError as e: request_meta["exception"] = e request_meta["response_time"] = (time.perf_counter() - start_perf_counter) * 1000 - events.request.fire(**request_meta) + self.env.events.request.fire(**request_meta) return request_meta["response"] return wrapper @@ -66,7 +67,7 @@ class GrpcUser(User): self._channel = grpc.insecure_channel(self.host) self._channel_closed = False stub = self.stub_class(self._channel) - self.client = GrpcClient(stub) + self.client = GrpcClient(environment, stub) class HelloGrpcUser(GrpcUser):
[docs] Use better method to mock ObjectRef Actually fix
@@ -161,12 +161,25 @@ MOCK_MODULES = [ ] +def make_typing_mock(module, name): + class Object: + pass + + Object.__module__ = module + Object.__qualname__ = name + Object.__name__ = name + + return Object + + def mock_modules(): for mod_name in MOCK_MODULES: mock_module = mock.MagicMock() mock_module.__spec__ = mock.MagicMock() sys.modules[mod_name] = mock_module + sys.modules["ray._raylet"].ObjectRef = make_typing_mock("ray", "ObjectRef") + sys.modules["tensorflow"].VERSION = "9.9.9"
fix(example): asyncpg==0.23.0 installs from wheels deb-packages aren't needed
FROM python:3.8.7-slim COPY requirements.txt ./ - -RUN apt-get update \ - # Can't find wheels for asyncpg for some reason :( - && apt-get install --no-install-recommends -y libpq-dev gcc \ - && pip install -r requirements.txt \ - && apt remove -y libpq-dev gcc \ - && apt -y autoremove \ - && rm -rf /var/lib/apt/lists/* \ - && apt-get clean +RUN pip install -r requirements.txt
Update correct list of default NTP hosts The host starting with 0 was missing from this documentation. After seeing it blocked, and checking the agent code, this update would correct it. * [V5 agent code](https://github.com/DataDog/dd-agent/blob/33afda662aade99500f454b33f208e8289818d7b/utils/ntp.py#L32) * [V6 agent code](https://github.com/DataDog/datadog-agent/blob/53b8106642411f2e992acc52bf6303de22c072a8/pkg/collector/corechecks/net/ntp_test.go#L300)
@@ -10,6 +10,7 @@ The Network Time Protocol (NTP) integration is enabled by default and reports th Default NTP servers reached: +* `0.datadog.pool.ntp.org` * `1.datadog.pool.ntp.org` * `2.datadog.pool.ntp.org` * `3.datadog.pool.ntp.org`
import: Narrow the id-window to just the current realm. On multi-realm systems this results in traversal of all messages in all realms and returns a massive payload of 1 row per stream on the server, not the intended one row per realm.
@@ -1328,7 +1328,8 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Rea bulk_import_model(data, Reaction) # Similarly, we need to recalculate the first_message_id for stream objects. - update_first_message_id_query = """ + update_first_message_id_query = SQL( + """ UPDATE zerver_stream SET first_message_id = subquery.first_message_id FROM ( @@ -1336,13 +1337,15 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Rea FROM zerver_message m JOIN zerver_recipient r ON r.id = m.recipient_id - WHERE r.type = 2 + WHERE r.type = 2 AND m.realm_id = %(realm_id)s GROUP BY r.type_id ) AS subquery WHERE zerver_stream.id = subquery.id """ + ) + with connection.cursor() as cursor: - cursor.execute(update_first_message_id_query) + cursor.execute(update_first_message_id_query, {"realm_id": realm.id}) if "zerver_userstatus" in data: fix_datetime_fields(data, "zerver_userstatus")
modified: infrastructure-provisioning/src/general/lib/os/fab.py added condition to "def install_ungit", which ungit.service file will be copied to deeplearning notebook
@@ -682,6 +682,9 @@ def install_ungit(os_user, notebook_name, edge_ip): if not exists(conn, '/home/{}/.ensure_dir/ungit_ensured'.format(os_user)): try: manage_npm_pkg('npm -g install ungit@{}'.format(os.environ['notebook_ungit_version'])) + if os.environ['conf_deeplearning_cloud_ami'] =='true' and os.environ['conf_cloud_provider'] =='azure' and os.environ['application'] =='deeplearning': + conn.put('/root/templates/ungit.service.18_04', '/tmp/ungit.service') + else: conn.put('/root/templates/ungit.service', '/tmp/ungit.service') conn.sudo("sed -i 's|OS_USR|{}|' /tmp/ungit.service".format(os_user)) http_proxy = conn.run('''bash -l -c 'echo $http_proxy' ''').stdout.replace('\n', '')
Improve add-admin tool Add description, epilog, help text, put contents into a main() function
@@ -8,21 +8,32 @@ from app.models import User app = create_app() app.app_context().push() -pass_length = 16 +PASS_LENGTH = 16 +def main(): + parser_desc = "Server-side utility to facilitate creating admins and toggling existing users to admins." + parser_epil = "Be sure that you're running this from within the virtual environment for the server." parser = argparse.ArgumentParser() -parser.add_argument("email") + parser.add_argument("email", metavar="[email protected]", help="email address to make admin") args = parser.parse_args() user = User.query.filter_by(email=args.email).first() if user is not None: + if user.is_admin: + print("User %s is already an admin" % args.email) + return user.is_admin = True db.session.add(user) db.session.commit() print("User %s is now an admin" % args.email) + return else: - password=''.join(random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(pass_length)) + password=''.join(random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(PASS_LENGTH)) user = User(email=args.email, is_admin=True) user.set_password(password) db.session.add(user) db.session.commit() print("User %s has been created as an admin with password %s" % (args.email, password)) + return + +if __name__ == "__main__": + main() \ No newline at end of file
Accounts CSV fixes Don't return the accounts dict before adding to it when a pickle isn't found, convert rows to a normal dict (instead of ordered), do a normal truth test instead of just checking for None.
@@ -333,8 +333,8 @@ def accounts_from_csv(new_accounts, pickled_accounts): del pickled_account['password'] account.update(pickled_account) else: - account['provider'] = account.get('provider') or 'ptc' - if not all(account.get(x) is not None for x in ('model', 'iOS', 'id')): + account['provider'] = account.get('provider', 'ptc') + if not all(account.get(x) for x in ('model', 'iOS', 'id')): account = generate_device_info(account) account['time'] = 0 account['captcha'] = False @@ -405,9 +405,7 @@ def load_accounts(): if config.ACCOUNTS_CSV: accounts = load_accounts_csv() - if not pickled_accounts: - return accounts - elif set(pickled_accounts) == set(accounts): + if set(pickled_accounts) == set(accounts): return pickled_accounts else: accounts = accounts_from_csv(accounts, pickled_accounts) @@ -429,7 +427,7 @@ def load_accounts_csv(): accounts = {} reader = DictReader(f) for row in reader: - accounts[row['username']] = row + accounts[row['username']] = dict(row) return accounts
tox-update: set the ansible.cfg path before update During an upgrade we're installation the platform with the stable-3.2 branch. But the ansible configuration is still using the file from the current branch which could have some differences. Instead we can override the ANSIBLE_CONFIG environment variable with the stable-3.2 commands.
@@ -50,13 +50,13 @@ commands= git clone -b stable-3.2 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt - ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/tests/functional/setup.yml' # configure lvm - ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm" + bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {changedir}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"' # deploy the cluster - ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/{env:INVENTORY} {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \ ceph_stable_release={env:CEPH_STABLE_RELEASE:mimic} \ @@ -64,7 +64,7 @@ commands= ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-mimic} \ copy_admin_key={env:COPY_ADMIN_KEY:False} \ - " + "' pip install -r {toxinidir}/tests/requirements.txt non_container: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
Handle multiresult exception entries without a success field Tested-by: Build Bot
@@ -200,7 +200,8 @@ class CouchbaseError(Exception): if not key: key = nokey_prefix + ":nokey:" + str(count) count += 1 - if v.success: + success = getattr(v,'success', True) + if success: ret_ok[key] = v else: ret_fail[key] = v
Fix indentation in training_features.sql Change formatting in training_features.sql to match styleguide.
@@ -52,5 +52,10 @@ SELECT FROM Training JOIN GroupSize ON Training.company_response_to_consumer = GroupSize.company_response_to_consumer -WHERE Training.company_response_to_consumer IN ('Untimely response', 'Closed', 'Closed with monetary relief', - 'Closed with non-monetary relief', 'Closed with explanation'); +WHERE Training.company_response_to_consumer IN ( + 'Untimely response', + 'Closed', + 'Closed with monetary relief', + 'Closed with non-monetary relief', + 'Closed with explanation' +);
Carbon tempest CI fix: let ODL create br-int To avoid this bug until it is fixed: Note that it would be better to run with devstack creating br-int because that is how most customers will be using this but until the above bug is fixed let's have a working gate.
@@ -80,7 +80,6 @@ if is_service_enabled odl-compute; then if is_service_enabled nova; then create_nova_conf_neutron fi - sudo ovs-vsctl --may-exist add-br $OVS_BR bind_opendaylight_controller wait_for_active_bridge $OVS_BR $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT
DOC: have notes in histogram_bin_edges match parameter style bins only accepts lowercase strings, but the notes are capitalized.
@@ -555,14 +555,14 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): using the `ptp` of the data. The final bin count is obtained from ``np.round(np.ceil(range / h))``. - 'Auto' (maximum of the 'Sturges' and 'FD' estimators) + 'auto' (maximum of the 'sturges' and 'fd' estimators) A compromise to get a good value. For small datasets the Sturges value will usually be chosen, while larger datasets will usually default to FD. Avoids the overly conservative behaviour of FD and Sturges for small and large datasets respectively. Switchover point is usually :math:`a.size \approx 1000`. - 'FD' (Freedman Diaconis Estimator) + 'fd' (Freedman Diaconis Estimator) .. math:: h = 2 \frac{IQR}{n^{1/3}} The binwidth is proportional to the interquartile range (IQR) @@ -570,7 +570,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): conservative for small datasets, but is quite good for large datasets. The IQR is very robust to outliers. - 'Scott' + 'scott' .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} The binwidth is proportional to the standard deviation of the @@ -580,14 +580,14 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): outliers. Values are very similar to the Freedman-Diaconis estimator in the absence of outliers. - 'Rice' + 'rice' .. math:: n_h = 2n^{1/3} The number of bins is only proportional to cube root of ``a.size``. It tends to overestimate the number of bins and it does not take into account data variability. - 'Sturges' + 'sturges' .. math:: n_h = \log _{2}n+1 The number of bins is the base 2 log of ``a.size``. This @@ -595,7 +595,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): larger, non-normal datasets. This is the default method in R's ``hist`` method. - 'Doane' + 'doane' .. math:: n_h = 1 + \log_{2}(n) + \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) @@ -607,7 +607,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): estimates for non-normal datasets. This estimator attempts to account for the skew of the data. - 'Sqrt' + 'sqrt' .. math:: n_h = \sqrt n The simplest and fastest estimator. Only takes into account the
help center: Add a detail to /help/mute-a-stream. Indicate that muted streams are sorted to the bottom of their section.
@@ -5,7 +5,8 @@ notifications, unless you are [mentioned](/help/mention-a-user-or-group). Messages from muted streams do not generate [alert word](/help/pm-mention-alert-notifications#alert-words) notifications. -Muted streams still appear in the left sidebar, though they are grayed out. +Muted streams still appear in the left sidebar, but they are grayed out and +sorted to the bottom of their section. !!! warn ""
Update index.md Resolves broken link mentioned in
@@ -16,7 +16,7 @@ group](https://delphi.cmu.edu/). The Epidata API includes: other epidemics tracked by Delphi through a variety of data streams. The Delphi group is extremely grateful for Pedrito Maynard-Zhang for all his -help with the Epidata API [documentation](api/index.md). +help with the Epidata API [documentation](api/README.md). Developers interested in modifying or extending this project are directed to the [Epidata API Development Guide](epidata_development.md).
Fix, was not detecting when star import value was not present. * This could happen if "__all__" value contained names that were not actually present in the module.
@@ -1396,8 +1396,7 @@ bool IMPORT_MODULE_STAR( PyObject *target, bool is_module, PyObject *module ) break; } - // TODO: Not yet clear, what happens with __all__ and "_" of its - // contents. + // When we are not using the "__all__", we should skip private variables. if ( all_case == false ) { if ( Nuitka_String_AsString_Unchecked( item )[0] == '_' ) @@ -1406,9 +1405,15 @@ bool IMPORT_MODULE_STAR( PyObject *target, bool is_module, PyObject *module ) } } - // TODO: What if it isn't there, because of e.g. wrong __all__ value. PyObject *value = LOOKUP_ATTRIBUTE( module, item ); + // Might not exist, because of e.g. wrong "__all__" value. + if (unlikely( value == NULL )) + { + Py_DECREF( item ); + break; + } + // TODO: Check if the reference is handled correctly if ( is_module ) {
[API] Fix 500 error in /storage/raw/{hash} Fixed a 500 error that occurred when providing an invalid hash to the /storage/raw/{hash} endpoint. The code was raising the wrong type of exception. Moved code around to reduce indentation.
@@ -2,7 +2,6 @@ import base64 import logging from aiohttp import web -from aiohttp.http_exceptions import HttpBadRequest from aleph.exceptions import AlephStorageException, UnknownHashError from aleph.handlers.forget import count_file_references @@ -95,12 +94,14 @@ app.router.add_get("/api/v0/storage/{hash}", get_hash) async def get_raw_hash(request): item_hash = request.match_info.get("hash", None) + if item_hash is None: + raise web.HTTPBadRequest(text="No hash provided") + try: engine = ItemType.from_hash(item_hash) except UnknownHashError: - raise HttpBadRequest(message="Invalid hash") + raise web.HTTPBadRequest(text="Invalid hash") - if item_hash is not None: try: content = await get_hash_content( item_hash, @@ -110,13 +111,11 @@ async def get_raw_hash(request): store_value=False, ) except AlephStorageException as e: - raise web.HTTPNotFound(text="not found") from e + raise web.HTTPNotFound(text="Not found") from e response = web.Response(body=content.value) response.enable_compression() return response - else: - raise web.HTTPBadRequest(text="no hash provided") app.router.add_get("/api/v0/storage/raw/{hash}", get_raw_hash)
Change kappa doctests This adds some rounding to the kappa values calculated in the docstrings, since different values were being encountered depending on the environment. An issue has been raised to discuss this (issue
@@ -802,9 +802,9 @@ class Rotor(object): -------- >>> rotor = rotor_example() >>> # H matrix for the 0th node - >>> rotor.H_kappa(0, 0) # doctest: +ELLIPSIS - array([[1.04039379e-27, 4.55965906e-17], - [4.55965906e-17, 1.99856891e-06]]) + >>> rotor.H_kappa(0, 0).round(6) # doctest: +ELLIPSIS + array([[0.e+00, 0.e+00], + [0.e+00, 2.e-06]]) """ # get vector of interest based on freqs vector = self.evectors[4 * node : 4 * node + 2, w] @@ -872,8 +872,8 @@ class Rotor(object): >>> rotor.kappa(0, 0)['Major axes'] # doctest: +ELLIPSIS 0.00141... >>> # kappa for node 2 and natural frequency (mode) 3. - >>> rotor.kappa(2, 3)['kappa'] # doctest: +ELLIPSIS - -3.720...e-13 + >>> rotor.kappa(2, 3)['kappa'].round(2) # doctest: +ELLIPSIS + -0.0 """ if wd: nat_freq = self.wd[w] @@ -938,8 +938,8 @@ class Rotor(object): -------- >>> rotor = rotor_example() >>> # kappa for each node of the first natural frequency - >>> rotor.kappa_mode(0) # doctest: +ELLIPSIS - [0.0, 0.0, 1.300...e-08, 0.0, 1.300...e-08, 0.0, 1.455...e-08] + >>> list(map(round, rotor.kappa_mode(0))) # doctest: +ELLIPSIS + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] """ kappa_mode = [self.kappa(node, w)["kappa"] for node in self.nodes] return kappa_mode
REF: extract as function `_copy_options_from_ts` to reuse it within the function `synthesize_many`.
@@ -1205,16 +1205,7 @@ def _spec_plus_sys( bool_states=False, bool_actions=False, statevar=statevar) - # consider sys just a formula, - # not a synthesis problem - # so overwrite settings - if hasattr(sys, 'moore'): - cp = sys - else: - cp = specs - sys_formula.moore = cp.moore - sys_formula.plus_one = cp.plus_one - sys_formula.qinit = cp.qinit + _copy_options_from_ts(sys_formula, sys, specs) specs = specs | sys_formula logger.debug('sys TS:\n' + str(sys_formula.pretty()) + _hl) if env is not None: @@ -1229,19 +1220,27 @@ def _spec_plus_sys( bool_states=False, bool_actions=False, statevar=statevar) - if hasattr(env, 'moore'): - cp = env - else: - cp = specs - env_formula.moore = cp.moore - env_formula.plus_one = cp.plus_one - env_formula.qinit = cp.qinit + _copy_options_from_ts(env_formula, env, specs) specs = specs | env_formula logger.debug('env TS:\n' + str(env_formula.pretty()) + _hl) logger.info('Overall Spec:\n' + str(specs.pretty()) + _hl) return specs +def _copy_options_from_ts(ts_spec, ts, specs): + """Copy `moore, qinit, plus_one` from `ts`, if set. + + Otherwise copy the values of those attributes from `specs`. + """ + if hasattr(ts, 'moore'): + cp = ts + else: + cp = specs + ts_spec.moore = cp.moore + ts_spec.plus_one = cp.plus_one + ts_spec.qinit = cp.qinit + + def strategy2mealy(A, spec): """Convert strategy to Mealy transducer.
test_altlinux: Remove print statement The commit introduced an unnecessary print statement in the altlinux tests.
@@ -4,7 +4,6 @@ from ceph_deploy.hosts.alt.install import map_components, NON_SPLIT_PACKAGES class TestALTMapComponents(object): def test_valid(self): pkgs = map_components(NON_SPLIT_PACKAGES, ['ceph-osd', 'ceph-common', 'ceph-radosgw']) - print(pkgs) assert 'ceph' in pkgs assert 'ceph-common' in pkgs assert 'ceph-radosgw' in pkgs
Add comments to explain how MultiProcessTestCase works Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -155,6 +155,16 @@ def simple_sparse_reduce_tests(rank, world_size, num_inputs=1): ] +# [How does MultiProcessTestCase work?] +# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by +# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an +# example which inherits from this class. Its `Setup()` methods calls into +# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()` +# subprocesses. During the spawn, the main process passes the test name to +# subprocesses, and the name is acquired from self.id(). The subprocesses +# then use the provided test function name to retrieve the function attribute +# from the test instance and run it. The main process simply waits for all +# subprocesses to join. class MultiProcessTestCase(TestCase): MAIN_PROCESS_RANK = -1 # This exit code is used to indicate that the test code had an error and
remove commented code in validators.py no longer setting generator_fuel_escalation_pct to escalation_pct for on-grid runs
@@ -546,11 +546,7 @@ class ValidateNestedInput: self.input_dict["Scenario"]["Site"]["LoadProfile"]["outage_end_time_step"] = 17520 else: self.input_dict["Scenario"]["Site"]["LoadProfile"]["outage_end_time_step"] = 8760 - # else: - # Sets diesel fuel escalation to the electricity escalation rate - # TODO: remove with next major UI update - # self.input_dict["Scenario"]["Site"]["Financial"]["generator_fuel_escalation_pct"] = \ - # self.input_dict["Scenario"]["Site"]["Financial"]["escalation_pct"] + @property def isValid(self): if self.input_data_errors or self.urdb_errors or self.ghpghx_inputs_errors:
Improve local mode transformer test Add a lock for the transformer test, similar to the serving tests as the serving container will still run on the same port. Also remove this test from the continuous testing suite.
@@ -368,8 +368,9 @@ def test_mxnet_local_data_local_script(): fcntl.lockf(local_mode_lock, fcntl.LOCK_UN) [email protected]_testing def test_local_transform_mxnet(sagemaker_local_session, tmpdir): + local_mode_lock_fd = open(LOCK_PATH, 'w') + local_mode_lock = local_mode_lock_fd.fileno() data_path = os.path.join(DATA_DIR, 'mxnet_mnist') script_path = os.path.join(data_path, 'mnist.py') @@ -392,7 +393,13 @@ def test_local_transform_mxnet(sagemaker_local_session, tmpdir): output_path = 'file://%s' % (str(tmpdir)) transformer = mx.transformer(1, 'local', assemble_with='Line', max_payload=1, strategy='SingleRecord', output_path=output_path) + + # Since Local Mode uses the same port for serving, we need a lock in order + # to allow concurrent test execution. + fcntl.lockf(local_mode_lock, fcntl.LOCK_EX) transformer.transform(transform_input, content_type='text/csv', split_type='Line') transformer.wait() + time.sleep(5) + fcntl.lockf(local_mode_lock, fcntl.LOCK_UN) assert os.path.exists(os.path.join(str(tmpdir), 'data.csv.out'))
Added options for qtbrowser interaction Pass fig factory instead Don't clear figure returned from factorygit add callbacks/best_effort.py Kwarg only on arguments
@@ -22,15 +22,17 @@ from .fitting import PeakStats class BestEffortCallback(CallbackBase): - def __init__(self): + def __init__(self, *, fig_factory=None, table_enabled=True): # internal state self._start_doc = None self._descriptors = {} self._table = None self._heading_enabled = True - self._table_enabled = True + self._table_enabled = table_enabled self._baseline_enabled = True self._plots_enabled = True + # axes supplied from outside + self._fig_factory = fig_factory # maps descriptor uid to dict which maps data key to LivePlot instance self._live_plots = {} self._live_grids = {} @@ -231,6 +233,9 @@ class BestEffortCallback(CallbackBase): # we need 1 or 2 dims to do anything, do not make empty figures return + if self._fig_factory: + fig = self._fig_factory(fig_name) + else: fig = plt.figure(fig_name) if not fig.axes: # This is apparently a fresh figure. Make axes.
Update __init__.py Added temp screen for claiming lnurl
@@ -53,6 +53,10 @@ def deletewallet(): return redirect(url_for("home")) [email protected]("/lnurl") +def lnurl(): + lnurl = request.args.get("lightning") + return render_template("lnurl.html", lnurl=lnurl) @app.route("/lnurlwallet") def lnurlwallet():
compose: Remove PM recipient box outline. This removes the old blue styled outline around the PM recipient box that was part of the older bootstrap styling in favor of the dark outline on :focus that had been implemented for the rest of the recipient boxes recently.
@@ -298,8 +298,7 @@ textarea.new_message_textarea { } textarea.new_message_textarea, -#subject.recipient_box, -#stream.recipient_box { +.compose_table .recipient_box { border: 1px solid hsl(0, 0%, 86%); box-shadow: none; -webkit-box-shadow: none; @@ -307,9 +306,10 @@ textarea.new_message_textarea, } textarea.new_message_textarea:focus, -#subject.recipient_box:focus, -#stream.recipient_box:focus { +.compose_table .recipient_box:focus { border: 1px solid hsl(0, 0%, 66%); + box-shadow: none; + -webkit-box-shadow: none; } #stream.recipient_box:focus {
Add missing setup_data annotation to test that uses data/ Previously this test could not be run on a pristine checkout on its own; it relied on a previous execution of a test with the setup_data annotation (in CI, parsl/tests/test_data/test_file_apps.py) to pass.
import os +import pytest import parsl from parsl.app.app import App @@ -14,6 +15,7 @@ def cat(inputs=[], outputs=[], stdout=None, stderr=None): """.format(i=infiles, o=outputs[0]) [email protected]('setup_data') def test_files(): if os.path.exists('cat_out.txt'):