message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Fail hard with tracebacks if pytest-expect isn't working
Fixes | +from __future__ import print_function
import os.path
+import sys
import pkg_resources
import pytest
@@ -15,6 +17,26 @@ _tokenizer = os.path.join(_testdata, "tokenizer")
_sanitizer_testdata = os.path.join(_dir, "sanitizer-testdata")
+def fail_if_missing_pytest_expect():
+ """Throws an exception halting pytest if pytest-expect isn't working"""
+ try:
+ from pytest_expect import expect # noqa
+ except ImportError:
+ header = '*' * 78
+ print(
+ '\n' +
+ header + '\n' +
+ 'ERROR: Either pytest-expect or its dependency u-msgpack-python is not\n' +
+ 'installed. Please install them both before running pytest.\n' +
+ header + '\n',
+ file=sys.stderr
+ )
+ raise
+
+
+fail_if_missing_pytest_expect()
+
+
def pytest_configure(config):
msgs = []
|
[Fix] Oauth2 fix for handling multiple scopes
Replaced "+" in scopes with space " " in case of Guest is redirected | @@ -59,7 +59,7 @@ def authorize(*args, **kwargs):
if frappe.session['user']=='Guest':
#Force login, redirect to preauth again.
frappe.local.response["type"] = "redirect"
- frappe.local.response["location"] = "/login?redirect-to=/api/method/frappe.integration_broker.oauth2.authorize?" + quote(params)
+ frappe.local.response["location"] = "/login?redirect-to=/api/method/frappe.integration_broker.oauth2.authorize?" + quote(params.replace("+"," "))
elif frappe.session['user']!='Guest':
try:
|
Update octodns/provider/mythicbeasts.py
Seems fair to me! I think a lot of the suggestions you've mentioned are obvious ones that have been lost on me from being very confused trying to understand the available objects | @@ -339,7 +339,7 @@ class MythicBeastsProvider(BaseProvider):
base = '{} {} {} {}'.format(action, hostname, ttl, _type)
- if re.match('[A]{1,4}', _type) is not None:
+ if _type in ('A', 'AAAA'):
for value in values:
commands.append('{} {}'.format(base, value))
|
Fix time constraint on reprocess_archive_stubs
The logic was wrong so it was not terminating after 4 minutes | @@ -300,7 +300,7 @@ def reprocess_archive_stubs():
cutoff = start + timedelta(minutes=4).total_seconds()
for stub in stubs:
# Exit this task after 4 minutes so that the same stub isn't ever processed in multiple queues.
- if time.time() - start > cutoff:
+ if time.time() > cutoff:
return
xform = FormAccessors(stub.domain).get_form(form_id=stub.xform_id)
# If the history wasn't updated the first time around, run the whole thing again.
|
Update randomtemp on Windows
Summary:
Introduce max retry times to the flaky CUDA build command.
Changes:
Targets
Pull Request resolved: | @@ -131,7 +131,7 @@ if not "%USE_CUDA%"=="0" (
:: in PATH, and then pass the arguments to it.
:: Currently, randomtemp is placed before sccache (%TMP_DIR_WIN%\bin\nvcc)
:: so we are actually pretending sccache instead of nvcc itself.
- curl -kL https://github.com/peterjc123/randomtemp/releases/download/v0.2/randomtemp.exe --output %TMP_DIR_WIN%\bin\randomtemp.exe
+ curl -kL https://github.com/peterjc123/randomtemp/releases/download/v0.3/randomtemp.exe --output %TMP_DIR_WIN%\bin\randomtemp.exe
set RANDOMTEMP_EXECUTABLE=%TMP_DIR_WIN%\bin\nvcc.exe
set CUDA_NVCC_EXECUTABLE=%TMP_DIR_WIN%\bin\randomtemp.exe
set RANDOMTEMP_BASEDIR=%TMP_DIR_WIN%\bin
|
Remove extraneous raise in the integrate step.
Introduced in | @@ -468,7 +468,6 @@ def process_datablock(self, tag, datablock):
try:
integrated = self.integrate(experiments, indexed)
except Exception, e:
- raise
print "Error integrating", tag, str(e)
if not self.params.dispatch.squash_errors: raise
return
|
Fix broken link
Removed period from URL | @@ -68,7 +68,7 @@ Hosting Recommendation for 100,000+ users
The following matrix presents key features for a successful multi-region Mattermost implementation that scales to 100,000 users with support for high availability and geographically based traffic routing in on premises, AWS, and Azure deployments.
-To scale to 100,000 users and above, we recommended using `the Mattermost open source load testing framework <https://github.com/mattermost/mattermost-load-test.>`__ to simulate usage of your system at full scale.
+To scale to 100,000 users and above, we recommended using `the Mattermost open source load testing framework <https://github.com/mattermost/mattermost-load-test>`__ to simulate usage of your system at full scale.
.. csv-table::
:header: "Feature", "On Premises", "Amazon AWS", "Azure"
|
[tests] inspect.getargspec is still available with Python 3.9.0a2
remove skipping TestPythonArgSpec for Python 3.6+
because it is still available
test DeprecationWarning for all Python 3 versions because
tests runs on Python 3.5+ only | @@ -749,7 +749,6 @@ class TestArgSpec(DeprecationTestCase):
return tools.getargspec(method)
[email protected](tools.PYTHON_VERSION >= (3, 6), 'removed in Python 3.6')
class TestPythonArgSpec(TestArgSpec):
"""Test the same tests using Python's implementation."""
@@ -759,7 +758,7 @@ class TestPythonArgSpec(TestArgSpec):
def getargspec(self, method):
"""Call inspect's getargspec function."""
with warnings.catch_warnings():
- if tools.PYTHON_VERSION >= (3, 5):
+ if not tools.PY2:
warnings.simplefilter('ignore', DeprecationWarning)
return inspect.getargspec(method)
|
There seems to be a UTC time bug around the dao_get_uploads_by_service_id function.
To fix the build tonight I'm putting freezing the time for the test. But will investigate further tomorrow. | @@ -189,6 +189,7 @@ def test_get_uploads_orders_by_processing_started_desc(sample_template):
assert results[1].id == upload_2.id
+@freeze_time("2020-10-27 16:15") # GMT time
def test_get_uploads_orders_by_processing_started_and_created_at_desc(sample_template):
letter_template = create_uploaded_template(sample_template.service)
|
Reserve IDs inline, not async
This fixes a long standing sporadic issue when running the tests, may also have affected live! | @@ -816,7 +816,7 @@ class FlushCommand(object):
def reserve_id(kind, id_or_name, namespace):
from google.appengine.api.datastore import _GetConnection
key = datastore.Key.from_path(kind, id_or_name, namespace=namespace)
- _GetConnection()._async_reserve_keys(None, [key])
+ _GetConnection()._reserve_keys([key])
class BulkInsertError(IntegrityError, NotSupportedError):
|
babel branch
babel branch | @@ -7,7 +7,7 @@ TOKEN_ATTR="circle-token=$SERVER_CI_TOKEN"
echo "Getting latest build num"
-ARTIFACT_BUILD_NUM=$(curl -s -H "$ACCEPT_TYPE" "$SERVER_API_URI/tree/master?limit=1&filter=successful&$TOKEN_ATTR" | jq '.[0].build_num')
+ARTIFACT_BUILD_NUM=$(curl -s -H "$ACCEPT_TYPE" "$SERVER_API_URI/tree/goja-babel?limit=1&filter=successful&$TOKEN_ATTR" | jq '.[0].build_num')
SERVER_DOWNLOAD_LINK=$(curl -s -H "$ACCEPT_TYPE" ${SERVER_API_URI}/${ARTIFACT_BUILD_NUM}/artifacts?${TOKEN_ATTR} | jq '.[].url' -r | grep demistoserver | grep /0/)
|
Update integration-MISP.yml
update misp integration descriptions | @@ -443,11 +443,11 @@ script:
type: javascript
commands:
- name: internal-misp-upload-sample
- description: "-"
+ description: Internal function, do not use it directly
arguments:
- name: filename
required: true
- description: "-"
+ description: File name
- name: fileContent
required: true
description: File Content in Base64
@@ -563,10 +563,10 @@ script:
description: RelatedEvent
description: Search for events in misp
- name: file
- description: "-"
+ description: Check file in misp
arguments:
- name: file
- description: "-"
+ description: file
required: true
outputs:
- contextPath: File.MD5
@@ -590,7 +590,7 @@ script:
- name: url
arguments:
- name: url
- description: "-"
+ description: Check URL in misp
required: true
outputs:
- contextPath: URL.Data
@@ -609,7 +609,7 @@ script:
description: The actual score
description: Check if IP is in MISP events
- name: ip
- description: "-"
+ description: Check ip in misp
arguments:
- name: ip
required: true
|
doc note on deterministic/non-deterministic gradient for min/max/median
Summary:
An update on the note that the subgradients for min/max are not deterministic.
Pull Request resolved: | @@ -3293,6 +3293,9 @@ add_docstr(torch.max,
Returns the maximum value of all elements in the ``input`` tensor.
+.. warning::
+ This function produces deterministic (sub)gradients unlike ``max(dim=0)``
+
Args:
{input}
@@ -3316,6 +3319,7 @@ value of each row of the :attr:`input` tensor in the given dimension
maximal value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
+ For the same reason do not expect the gradients to be deterministic.
If ``keepdim`` is ``True``, the output tensors are of the same size
as ``input`` except in the dimension ``dim`` where they are of size 1.
@@ -3471,6 +3475,9 @@ add_docstr(torch.median,
Returns the median value of all elements in the :attr:`input` tensor.
+.. warning::
+ This function produces deterministic (sub)gradients unlike ``median(dim=0)``
+
Args:
{input}
@@ -3495,6 +3502,14 @@ as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
+.. warning::
+ ``indices`` does not necessarily contain the first occurrence of each
+ median value found, unless it is unique.
+ The exact implementation details are device-specific.
+ Do not expect the same result when run on CPU and GPU in general.
+ For the same reason do not expect the gradients to be deterministic.
+
+
Args:
{input}
{dim}
@@ -3519,6 +3534,9 @@ add_docstr(torch.min,
Returns the minimum value of all elements in the :attr:`input` tensor.
+.. warning::
+ This function produces deterministic (sub)gradients unlike ``min(dim=0)``
+
Args:
{input}
@@ -3542,6 +3560,7 @@ value of each row of the :attr:`input` tensor in the given dimension
minimal value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
+ For the same reason do not expect the gradients to be deterministic.
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
|
Minor reformatting/error message rewording
TN: | @@ -2692,13 +2692,13 @@ class PropertyDef(AbstractNodeData):
if self.external:
check_source_language(
uses_entity_info is not None,
- "Need to specify uses_entity_info for external properties"
+ 'uses_entity_info is required for external properties'
)
self._uses_entity_info = uses_entity_info
else:
check_source_language(
uses_entity_info in (None, False),
- "Cannot specify uses_entity_info=True for internal properties"
+ 'Cannot specify uses_entity_info=True for internal properties'
)
self._uses_entity_info = uses_entity_info
self.optional_entity_info = optional_entity_info
|
Change docker instructions on README.md
Change all refernces of INPUT_DIR to INPUT_PATH
change all references of OUTPUT_DIR to OUTPUT_PATH
change a paragraph | @@ -74,23 +74,25 @@ python3 -m manim example_scenes.py SquareToCircle -pl
### Using Docker
Since it's a bit tricky to get all the dependencies set up just right, there is a Dockerfile and Compose file provided in this repo as well as [a premade image on Docker Hub](https://hub.docker.com/r/eulertour/manim/tags/). The Dockerfile contains instructions on how to build a manim image, while the Compose file contains instructions on how to run the image.
-In order to do this with the Compose file, you must set the `INPUT_DIR`
+In order to do this with the Compose file, you must set the `INPUT_PATH`
environment variable to the directory containing manim repository and the
-`OUTPUT_DIR` environment variable to the directory where you want media to be written.
+`OUTPUT_PATH` environment variable to the directory where you want media to be written.
1. [Install Docker](https://docs.docker.com)
2. [Install Docker Compose](https://docs.docker.com/compose/install/)
3. Render an animation
```sh
-INPUT_DIR=/path/to/dir/containing/source/code \
-OUTPUT_DIR=/path/to/output/ \
+INPUT_PATH=/path/to/dir/containing/source/code \
+OUTPUT_PATH=/path/to/output/ \
docker-compose run manim example_scenes.py SquareToCircle -l
```
The command needs to be run as root if your username is not in the docker group.
-example_scenes.py or your own project should point to the file on the host machine.
+replace `example_scenes.py` with your own on the host machine for your projects.
-After running the output will say files ready at `tmp/output/`, which is inside the container. Your OUTPUT_DIR is bind mounted to this `/tmp/output` so any changes made by the container to `/tmp/output` will be mirrored on your OUTPUT_DIR. `/media/` will be created in `OUTPUT_DIR`.
+<img src=./manim_docker_diagram.png/>
+
+After running the output will say files ready at `/tmp/output/`, which refers to path inside the container. Your OUTPUT_PATH is bind mounted to this `/tmp/output` so any changes made by the container to `/tmp/output` will be mirrored on your OUTPUT_PATH. `/media/` will be created in `OUTPUT_PATH`.
`-p` won't work as manim would look for video player in the container system, which it does not have.
|
Remove ignore invalid-name flag
Conform to pylint naming requirments | @@ -10,7 +10,6 @@ arg_map = {
"--reports=no",
"--disable=I",
"--disable=duplicate-code",
- "--disable=invalid-name",
"--msg-template='{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'",
],
"tests/blackbox/stratisd_cert.py": [
|
cosmetic_changes.py: Do not convert text to byte string
This may cause UnicodeEncodeError on Python 2. See | @@ -659,7 +659,8 @@ class CosmeticChangesToolkit(object):
for template in skip_templates[self.site.code]:
skip_regexes.append(
re.compile(r'\{\{\s*%s\s*\}\}' % template, re.I))
- stripped_text = str(text)
+
+ stripped_text = text
for reg in skip_regexes:
stripped_text = reg.sub(r'', stripped_text)
|
Channel tweaks
1. Fix buffer length bug (always 10 less than stated buffer_size)
2. Use `deque` instead of `list` for buffer because it is quicker (and this is exactly what it was designed for)
3. Avoid double application of indent or timestamp on `channel open` | @@ -5,6 +5,7 @@ import os
import re
import threading
import time
+from collections import deque
from threading import Lock, Thread
from typing import Any, Callable, Dict, Generator, Optional, Tuple, Union
@@ -2668,7 +2669,7 @@ class Channel:
if buffer_size == 0:
self.buffer = None
else:
- self.buffer = list()
+ self.buffer = deque()
def __repr__(self):
return "Channel(%s, buffer_size=%s, line_end=%s)" % (
@@ -2677,7 +2678,11 @@ class Channel:
repr(self.line_end),
)
- def __call__(self, message: Union[str, bytes, bytearray], *args, indent=True, **kwargs):
+ def __call__(
+ self, message: Union[str, bytes, bytearray], *args,
+ indent: Optional[bool]=True, **kwargs
+ ):
+ original_msg = message
if self.line_end is not None:
message = message + self.line_end
if indent and not isinstance(message, (bytes, bytearray)):
@@ -2686,11 +2691,15 @@ class Channel:
ts = datetime.datetime.now().strftime("[%H:%M:%S] ")
message = ts + message.replace("\n", "\n%s" % ts)
for w in self.watchers:
+ # Avoid double timestamp and indent
+ if isinstance(w, Channel):
+ w(original_msg)
+ else:
w(message)
if self.buffer is not None:
self.buffer.append(message)
- if len(self.buffer) + 10 > self.buffer_size:
- self.buffer = self.buffer[-self.buffer_size :]
+ while len(self.buffer) > self.buffer_size:
+ self.buffer.popleft()
def __len__(self):
return self.buffer_size
|
Update README.md
right-hand drive vehicles are supported | @@ -240,7 +240,6 @@ Many factors can impact the performance of openpilot DM, causing it to be unable
* Low light conditions, such as driving at night or in dark tunnels.
* Bright light (due to oncoming headlights, direct sunlight, etc.).
* The driver's face is partially or completely outside field of view of the driver facing camera.
-* Right hand driving vehicles.
* The driver facing camera is obstructed, covered, or damaged.
The list above does not represent an exhaustive list of situations that may interfere with proper operation of openpilot components. A driver should not rely on openpilot DM to assess their level of attention.
|
api: note get_url doesn't chekc file existence
per | @@ -58,6 +58,7 @@ def get_url(path, repo=None, rev=None, remote=None):
"""
Returns the full URL to the data artifact specified by its `path` in a
`repo`.
+ NOTE: There is no guarantee that the file actually exists in that location.
"""
try:
with _make_repo(repo, rev=rev) as _repo:
|
Hypothesis tests is not a matrix test
Remove test strategy. | @@ -11,9 +11,6 @@ jobs:
hypothesis:
runs-on: ubuntu-20.04
timeout-minutes: 60
- strategy:
- matrix:
- python_version: ["3.10"]
steps:
- uses: actions/[email protected]
with:
@@ -23,7 +20,7 @@ jobs:
- name: Set up Python
uses: actions/[email protected]
with:
- python-version: ${{ matrix.python_version }}
+ python-version: ${{ env.python_version }}
- name: Use Python Dependency Cache
uses: actions/[email protected]
with:
|
fw/job: add missing log indents
Add "with indentcontext():" to Job stages that were missing them. | @@ -108,15 +108,18 @@ class Job(object):
def configure_target(self, context):
self.logger.info('Configuring target for job {}'.format(self))
+ with indentcontext():
context.tm.commit_runtime_parameters(self.spec.runtime_parameters)
def setup(self, context):
self.logger.info('Setting up job {}'.format(self))
+ with indentcontext():
with signal.wrap('WORKLOAD_SETUP', self, context):
self.workload.setup(context)
def run(self, context):
self.logger.info('Running job {}'.format(self))
+ with indentcontext():
with signal.wrap('WORKLOAD_EXECUTION', self, context):
start_time = datetime.utcnow()
try:
|
Change the character length
Fix the wrong column data size for bot_token in the built-in SQLAlchemy data model | @@ -89,7 +89,7 @@ class SQLAlchemyInstallationStore(InstallationStore):
Column("enterprise_name", String(200)),
Column("team_id", String(32)),
Column("team_name", String(200)),
- Column("bot_token", String(32)),
+ Column("bot_token", String(200)),
Column("bot_id", String(32)),
Column("bot_user_id", String(32)),
Column("bot_scopes", String(1000)),
|
Backport
Fix typo perkissive to permissive | @@ -105,7 +105,8 @@ class AutoKeyTest(TestCase):
@patch_check_permissions()
def test_check_permissions_group_can_write_not_permissive(self):
"""
- Assert that a file is accepted, when group can write to it and perkissive_pki_access=False
+ Assert that a file is accepted, when group can write to it and
+ permissive_pki_access=False
"""
self.stats["testfile"] = {"mode": gen_permissions("w", "w", ""), "gid": 1}
if salt.utils.platform.is_windows():
@@ -116,7 +117,8 @@ class AutoKeyTest(TestCase):
@patch_check_permissions(permissive_pki=True)
def test_check_permissions_group_can_write_permissive(self):
"""
- Assert that a file is accepted, when group can write to it and perkissive_pki_access=True
+ Assert that a file is accepted, when group can write to it and
+ permissive_pki_access=True
"""
self.stats["testfile"] = {"mode": gen_permissions("w", "w", ""), "gid": 1}
self.assertTrue(self.auto_key.check_permissions("testfile"))
@@ -124,8 +126,8 @@ class AutoKeyTest(TestCase):
@patch_check_permissions(uid=0, permissive_pki=True)
def test_check_permissions_group_can_write_permissive_root_in_group(self):
"""
- Assert that a file is accepted, when group can write to it, perkissive_pki_access=False,
- salt is root and in the file owning group
+ Assert that a file is accepted, when group can write to it,
+ permissive_pki_access=False, salt is root and in the file owning group
"""
self.stats["testfile"] = {"mode": gen_permissions("w", "w", ""), "gid": 0}
self.assertTrue(self.auto_key.check_permissions("testfile"))
@@ -133,8 +135,9 @@ class AutoKeyTest(TestCase):
@patch_check_permissions(uid=0, permissive_pki=True)
def test_check_permissions_group_can_write_permissive_root_not_in_group(self):
"""
- Assert that no file is accepted, when group can write to it, perkissive_pki_access=False,
- salt is root and **not** in the file owning group
+ Assert that no file is accepted, when group can write to it,
+ permissive_pki_access=False, salt is root and **not** in the file owning
+ group
"""
self.stats["testfile"] = {"mode": gen_permissions("w", "w", ""), "gid": 1}
if salt.utils.platform.is_windows():
|
Adding TDP for Intel Xeon Gold 6230N
TDP, see
Relates to | @@ -2047,6 +2047,7 @@ Intel Xeon E7-8890 v3,165
Intel Xeon E7-8891 v3,165
Intel Xeon E7-8893 v3,140
Intel Xeon Gold 6154,200
+Intel Xeon Gold 6230N,125
Intel Xeon L5506,60
Intel Xeon L5508,38
Intel Xeon L5518,60
|
client: run chown if file is not owned by swarming user
This is to make file/dir movable to local cache directory. | @@ -955,6 +955,14 @@ class NamedCache(Cache):
abs_cache = os.path.join(self.cache_dir, rel_cache)
logging.info('- Moving to %r', rel_cache)
file_path.ensure_tree(os.path.dirname(abs_cache))
+
+ if sys.platform != 'win32':
+ uid = os.getuid()
+ if os.stat(src).st_uid != uid:
+ # Maybe owner of |src| is different from runner of this script. This
+ # is to make fs.rename work in that case.
+ # https://crbug.com/986676
+ subprocess.check_call(['sudo', '-n', 'chown', str(uid), src])
fs.rename(src, abs_cache)
self._lru.add(name, (rel_cache, size))
|
Show env vars on form only when env vars are in use
Stop env var header from showing when there are no env vars | @@ -301,6 +301,7 @@ export class SubmitNotebook extends Widget implements Dialog.IBodyWidget<ISubmit
let td_colspan4 = '<td style="padding: 1px;" colspan=4>';
let subtitle = '<div style="font-size: var(--jp-ui-font-size3)">Environmental Variables</div>'
+ if (this._envVars.length > 0) {
let html = '' + tr + td_colspan4 + subtitle + '</td>' + '</tr>';
for (let i = 0; i < this._envVars.length; i++) {
@@ -320,8 +321,11 @@ export class SubmitNotebook extends Widget implements Dialog.IBodyWidget<ISubmit
}
}
-
return html;
+
+ } else {
+ return '';
+ }
}
getValue(): ISubmitNotebookConfiguration {
|
Add macOS tests to Travis
Add initial support for CI with macOS, and deploy to GitHub and PyPI | language: python
-dist: xenial
cache: pip
+python: 3.7.2
stages:
- lint
@@ -8,11 +8,42 @@ stages:
jobs:
include:
- - python: 3.7.2
+ - os: linux
+ dist: xenial
env: PYTEST_ADDOPTS="--doctest-modules"
sudo: true
+ before_install:
+ - sudo apt-get update -q
+ - sudo apt-get install --no-install-recommends -y xvfb python3-dev python3-gi python3-gi-cairo
+ gir1.2-gtk-3.0 libgirepository1.0-dev libcairo2-dev
+ before_deploy:
+ - poetry config http-basic.pypi $PYPI_USER $PYPI_PASSWORD
+ - poetry build
+ deploy:
+ provider: script
+ script: poetry publish
+ on:
+ tags: true
+
+ - os: osx
+ env: PYTEST_ADDOPTS="--doctest-modules"
+ before_install:
+ - brew update
+ - brew install gobject-introspection gtk+3
+ before_deploy:
+ - python setup.py macos -b
+ deploy:
+ - provider: releases
+ api_key:
+ secure: YOUR_API_KEY_ENCRYPTED
+ file: "macOS/Gaphor.dmg"
+ skip_cleanup: true
+ on:
+ tags: true
- stage: lint
+ os: linux
+ dist: xenial
python: 3.7.2
before_install: skip
install:
@@ -21,11 +52,9 @@ jobs:
script:
- pre-commit run --all-files
after_success: skip
+ before_deploy: skip
+ deploy: skip
-before_install:
- - sudo apt-get update -q
- - sudo apt-get install --no-install-recommends -y xvfb python3-dev python3-gi python3-gi-cairo
- gir1.2-gtk-3.0 libgirepository1.0-dev libcairo2-dev
install:
- pip --disable-pip-version-check install poetry
- poetry install
@@ -34,6 +63,7 @@ script:
after_success:
- coveralls
+
notifications:
email: false
webhooks:
|
remove line from docstring
seems to be a mistake? | @@ -31,7 +31,6 @@ def signal_filter(
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
- or ``"bandstop"``.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
lowcut : float
|
Do not log call parameters on info level
Currently, when logging Glacier actions (e.g. upload), logging at info level is unusable, as entire file content gets logged (this can be many gigabytes). | @@ -146,7 +146,7 @@ class BatchAction(ServiceAction):
params.update(kwargs)
- logger.info('Calling %s:%s with %r',
+ logger.debug('Calling %s:%s with %r',
service_name, operation_name, params)
response = getattr(client, operation_name)(**params)
@@ -193,7 +193,7 @@ class WaiterAction(object):
params = create_request_parameters(parent, self._waiter_model)
params.update(kwargs)
- logger.info('Calling %s:%s with %r',
+ logger.debug('Calling %s:%s with %r',
parent.meta.service_name,
self._waiter_resource_name, params)
|
Update whatsnew
include new component PiecewiseLinearTransformer in whatsnew for v0.4.2 | @@ -16,7 +16,7 @@ New features
New components/constraints
^^^^^^^^^^^^^^^^^^^^^^^^^^
-* something
+* Custom component: oemof.solph.custom.PiecewiseLinearTransformer. A transformer model with one input and one output and an arbitrary piecewise linear conversion function. On how to use the component, refer to the `test script <https://github.com/oemof/oemof-solph/blob/dev/tests/test_scripts/test_solph/test_piecewiselineartransformer/test_piecewiselineartransformer.py>`_ and `example <https://github.com/oemof/oemof-examples/blob/master/oemof_examples/oemof.solph/v0.4.x/piecewise/piecewise_linear_transformer.py>`_.
Documentation
^^^^^^^^^^^^^^^^^^^^
@@ -46,4 +46,5 @@ Other changes
Contributors
^^^^^^^^^^^^^^^^^^^^
-* something
+* Jann Launer
+* Stefan Schirmeister
|
update ContactTerm.get_fargs() to return gap function in evaluation mode
update .call_function(), .eval_real()
rename .function() -> .function_weak()
new .integrate(), .function(), .get_eval_shape() | @@ -109,9 +109,9 @@ class ContactTerm(Term):
return self.ci
- def call_function(self, fargs):
+ def call_function(self, out, fargs):
try:
- out, status = self.function(*fargs)
+ out, status = self.function(out, *fargs)
except (RuntimeError, ValueError):
terms.errclear()
@@ -125,16 +125,34 @@ class ContactTerm(Term):
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
- out, status = self.call_function(fargs)
- if mode != 'weak':
- raise ValueError('unsupported evaluation mode! (%s)' % mode)
+ if mode == 'weak':
+ out, status = self.call_function(None, fargs)
+
+ else:
+ out = nm.empty(shape, dtype=nm.float64)
+ status = self.call_function(out, fargs)
return out, status
@staticmethod
- def function(out_cc):
+ def function_weak(out, out_cc):
return out_cc, 0
+ @staticmethod
+ def integrate(out, val_qp, geo, fmode):
+ if fmode == 2:
+ out[:] = val_qp
+ status = 0
+
+ else:
+ status = geo.integrate(out, val_qp, fmode)
+
+ return out, status
+
+ @staticmethod
+ def function(out, fun, *args):
+ return fun(out, *args)
+
def get_fargs(self, epss, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
geo, _ = self.get_mapping(virtual)
@@ -147,6 +165,7 @@ class ContactTerm(Term):
GPs = ci.update(xx)
+ if mode == 'weak':
Gc = nm.zeros(ci.neq, dtype=nm.float64)
activeGPs = GPs[:, 2*ci.nsd+3]
@@ -181,11 +200,37 @@ class ContactTerm(Term):
#self.detect = max(0, self.detect - 1)
if diff_var is None:
from sfepy.discrete.variables import create_adof_conn
- rows = nm.unique(create_adof_conn(nm.arange(len(Gc)), ci.sd.econn,
+ rows = nm.unique(create_adof_conn(nm.arange(len(Gc)),
+ ci.sd.econn,
ci.nsd, 0))
out_cc = (Gc[rows], rows, state)
else:
out_cc = (vals[:num], rows[:num], cols[:num], state, state)
- return out_cc,
+ return self.function_weak, out_cc
+
+ elif mode in ('el_avg', 'qp'):
+ fmode = {'el_avg' : 1, 'qp' : 2}[mode]
+
+ if term_mode == 'gap':
+ gap = GPs[:, ci.nsd + 2].reshape(-1, ci.ngp, 1, 1)
+ gap[gap > 0] = 0.0
+ return self.integrate, gap, geo, fmode
+
+ else:
+ raise ValueError('unsupported term mode in %s! (%s)'
+ % (self.name, term_mode))
+
+ else:
+ raise ValueError('unsupported evaluation mode in %s! (%s)'
+ % (self.name, mode))
+
+ def get_eval_shape(self, epss, virtual, state,
+ mode=None, term_mode=None, diff_var=None, **kwargs):
+ n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
+
+ if mode != 'qp':
+ n_qp = 1
+
+ return (n_el, n_qp, 1, 1), state.dtype
|
Always use sns.set in API docs
Closes | @@ -2295,7 +2295,7 @@ boxplot.__doc__ = dedent("""\
:context: close-figs
>>> import seaborn as sns
- >>> sns.set_style("whitegrid")
+ >>> sns.set(style="whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.boxplot(x=tips["total_bill"])
@@ -2473,7 +2473,7 @@ violinplot.__doc__ = dedent("""\
:context: close-figs
>>> import seaborn as sns
- >>> sns.set_style("whitegrid")
+ >>> sns.set(style="whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.violinplot(x=tips["total_bill"])
@@ -2686,7 +2686,7 @@ stripplot.__doc__ = dedent("""\
:context: close-figs
>>> import seaborn as sns
- >>> sns.set_style("whitegrid")
+ >>> sns.set(style="whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.stripplot(x=tips["total_bill"])
@@ -2889,7 +2889,7 @@ swarmplot.__doc__ = dedent("""\
:context: close-figs
>>> import seaborn as sns
- >>> sns.set_style("whitegrid")
+ >>> sns.set(style="whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.swarmplot(x=tips["total_bill"])
@@ -3051,7 +3051,7 @@ barplot.__doc__ = dedent("""\
:context: close-figs
>>> import seaborn as sns
- >>> sns.set_style("whitegrid")
+ >>> sns.set(style="whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.barplot(x="day", y="total_bill", data=tips)
@@ -3243,7 +3243,7 @@ pointplot.__doc__ = dedent("""\
:context: close-figs
>>> import seaborn as sns
- >>> sns.set_style("darkgrid")
+ >>> sns.set(style="darkgrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.pointplot(x="time", y="total_bill", data=tips)
|
update ecg_period calculation in ecg_hrv
can you confirm this change? ecg_period should be 1/ecg_rate(in milisecs) right? | @@ -2,6 +2,7 @@ import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
+import scipy
from .ecg_rate import ecg_rate as nk_ecg_rate
from ..signal.signal_formatpeaks import _signal_formatpeaks_sanitize
@@ -95,8 +96,9 @@ def ecg_hrv(ecg_rate, rpeaks=None, sampling_rate=1000, show=False):
ecg_rate, rpeaks = _ecg_hrv_formatinput(ecg_rate, rpeaks, sampling_rate)
# Get raw and interpolated R-R intervals
- rri = np.diff(rpeaks) / sampling_rate * 1000
- ecg_period = ecg_rate / 60 * 1000 # Express in milliseconds
+ rri = np.diff(rpeaks) / sampling_rate * 1000 # milliseconds
+ ecg_period = 60 * 1000/ecg_rate # Express in milliseconds
+
# Get indices
hrv = {} # Initialize empty dict
|
Update README.md
Update install instructions for conjure-up | @@ -24,10 +24,7 @@ knowledge. It is comprised of the following components and features:
Installation has been automated via [conjure-up](http://conjure-up.io/):
- sudo apt-add-repository ppa:juju/stable
- sudo apt-add-repository ppa:conjure-up/next
- sudo apt update
- sudo apt install conjure-up
+ sudo snap install conjure-up --classic
conjure-up canonical-kubernetes
Conjure will prompt you for deployment options (AWS, GCE, Azure, etc.) and credentials.
|
Run git diff in a subshell to ensure correct parsing
Also drop unneeded xargs invocation | @@ -22,8 +22,8 @@ yarn --cwd "frontend" pretty-quick --staged
# "--diff-filter=ACMR" only lists files that are [A]dded, [C]opied, [M]odified,
# or [R]enamed; we don't want to try to format files that have been deleted.
if command -v "black" > /dev/null; then
- changed_files=git diff --diff-filter=ACMR --name-only --cached | grep -E "\.pyi?$"
- if [ -n changed_files ]; then
- xargs black $changed_files
+ changed_files=$(git diff --diff-filter=ACMR --name-only --cached | grep -E "\.pyi?$")
+ if [ $changed_files ]; then
+ black $changed_files
fi
fi
|
Handle matrix warnings in test_interconnect_unused_{input,output}
Ignore warnings with match string from conftest.py's `matrixfilter`
warning filter. | @@ -9,6 +9,7 @@ created for that purpose.
"""
from __future__ import print_function
+import re
import numpy as np
import pytest
@@ -1437,7 +1438,13 @@ def test_interconnect_unused_input():
connections=False)
#https://docs.pytest.org/en/6.2.x/warnings.html#recwarn
- assert not record
+ for r in record:
+ # strip out matrix warnings
+ if re.match(r'.*matrix subclass', str(r.message)):
+ continue
+ print(r.message)
+ pytest.fail(f'Unexpected warning: {r.message}')
+
# warn if explicity ignored input in fact used
with pytest.warns(UserWarning, match=r"Input\(s\) specified as ignored is \(are\) used:") as record:
@@ -1481,7 +1488,6 @@ def test_interconnect_unused_output():
h = ct.interconnect([g,s,k],
inputs=['r'],
outputs=['y'])
- print(record.list[0])
# no warning if output explicitly ignored
@@ -1501,7 +1507,12 @@ def test_interconnect_unused_output():
connections=False)
#https://docs.pytest.org/en/6.2.x/warnings.html#recwarn
- assert not record
+ for r in record:
+ # strip out matrix warnings
+ if re.match(r'.*matrix subclass', str(r.message)):
+ continue
+ print(r.message)
+ pytest.fail(f'Unexpected warning: {r.message}')
# warn if explicity ignored output in fact used
with pytest.warns(UserWarning, match=r"Output\(s\) specified as ignored is \(are\) used:"):
|
Prevent `profile_observer_test` from being run by CPU test
Summary:
Fix CMakeLists.txt, so the test for CPU won't run profile_observer_test.cc, as currently it only supports GPU
Pull Request resolved: | if(USE_OBSERVERS)
message(STATUS "Include Observer library")
+ set(GLOB profile_observer_files profile_observer_*.cc)
set(Caffe2_CONTRIB_OBSERVERS_CPU_SRC
"${CMAKE_CURRENT_SOURCE_DIR}/time_observer.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/runcnt_observer.cc"
)
+ set(Caffe2_CONTRIB_OBSERVERS_GPU_SRC
+ "${CMAKE_CURRENT_SOURCE_DIR}/profile_observer_gpu.cc"
+ )
set(Caffe2_CPU_SRCS ${Caffe2_CPU_SRCS} ${Caffe2_CONTRIB_OBSERVERS_CPU_SRC})
set(Caffe2_CPU_SRCS ${Caffe2_CPU_SRCS} PARENT_SCOPE)
+ set(Caffe2_GPU_SRCS ${Caffe2_GPU_SRCS} ${Caffe2_CONTRIB_OBSERVERS_GPU_SRC})
+ set(Caffe2_GPU_SRCS ${Caffe2_GPU_SRCS} PARENT_SCOPE)
+
# ---[ CPU test files
file(GLOB tmp *_test.cc)
set(Caffe2_CPU_TEST_SRCS ${Caffe2_CPU_TEST_SRCS} ${tmp})
set(Caffe2_CPU_TEST_SRCS ${Caffe2_CPU_TEST_SRCS} PARENT_SCOPE)
+ exclude(Caffe2_CPU_TEST_SRCS "${Caffe2_CPU_TEST_SRCS}" ${profile_observer_files})
+
+ # ---[ GPU test files
+ set(Caffe2_GPU_TEST_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/profile_observer_test.cc")
endif()
|
4.6 changelog update about Zoom plugin
Move note about supporting the on-prem version of Zoom to v4.7 | @@ -30,7 +30,6 @@ Release date: 2017-01-16
#### Plugins (Beta)
- Plugins now support slash commands.
-- Zoom plugin now supports an on-premise Zoom server.
#### Notifications
@@ -138,6 +137,7 @@ Multiple setting options were added to `config.json`. Below is a list of the add
- Letters are skipped in a few dialogs when using Korean keyboard in IE11.
- Push notifications don't always clear on iOS when running Mattermost in High Availability mode.
- Deleting a team via the API breaks the user interface.
+- Bot messages from the Zoom plugin ignore the Zoom API URL field for on-prem Zoom servers.
### Contributors
|
Allow Moving Observer to Cuda
Summary: Allow for moving observer to cuda in emulate_int8_{method} functions in order to make it compatible with pytext trainers. | @@ -20,6 +20,7 @@ def quantize(w, scale, zero_point):
def emulate_int8_histogram(w, scale=None, zero_point=None):
if scale is None:
obs = torch.quantization.observer.HistogramObserver()
+ obs.to(device=w.device)
_ = obs(w.float())
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
@@ -32,6 +33,7 @@ def emulate_int8_channel(w, scale=None, zero_point=None):
obs = torch.quantization.observer.PerChannelMinMaxObserver(
ch_axis=-1, qscheme=torch.per_channel_symmetric
)
+ obs.to(device=w.device)
_ = obs(w)
scale, zero_point, ch_axis = obs.get_qparams()
scale = scale.cuda().type_as(w)
@@ -42,6 +44,7 @@ def emulate_int8_channel(w, scale=None, zero_point=None):
def emulate_int8_tensor(w, scale=None, zero_point=None):
if scale is None:
obs = torch.quantization.observer.MinMaxObserver()
+ obs.to(device=w.device)
_ = obs(w)
scale, zero_point = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
|
Ignore errors finding the partition with the shortest backlog
falling back instead to the default partitioner | @@ -3,6 +3,7 @@ from memoized import memoized
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.connection import get_kafka_consumer
from corehq.util.quickcache import quickcache
+from dimagi.utils.logging import notify_exception
from pillowtop import get_pillow_by_name, get_all_pillow_configs
@@ -19,7 +20,14 @@ def choose_best_partition_for_topic(topic):
# None means there's no best, use the default
return None
+ try:
backlog_lengths_by_partition = _get_backlog_lengths_by_partition(topic)
+ except Exception:
+ # if there's any issue whatsoever fetching offsets
+ # fall back to the default partitioning algorithm
+ notify_exception(None, "Error measuring kafka partition backlog lengths")
+ return None
+ else:
_, best_partition = min(
(backlog_length, partition)
for partition, backlog_length in backlog_lengths_by_partition.items()
|
Escape caracheter in manf#
Now interpret the scape caracter in the `manf#` code and in the distributors code. Before was only in `manf#`. Discussion with and other user about the `,`in the `manf#`. | @@ -351,13 +351,6 @@ def subpart_split(components):
except KeyError:
continue
- # Remove any escape backslashes preceding PART_SEPRTR.
- for c in split_components.values():
- try:
- c['manf#'] = re.sub(ESC_FIND, r'\1', c['manf#'])
- except KeyError:
- pass
-
return split_components
@@ -411,6 +404,7 @@ def subpart_qtypart(subpart):
'ADUM3150BRSZ-RL7' -> ('1', 'ADUM3150BRSZ-RL7')
'ADUM3150BRSZ-RL7:' -> ('1', 'ADUM3150BRSZ-RL7') forgot the qty understood '1'
'''
+ subpart = re.sub(ESC_FIND, r'\1', subpart) # Remove any escape backslashes preceding PART_SEPRTR.
strings = re.split(QTY_SEPRTR, subpart)
if len(strings)==2:
# Search for numbers, matching with simple, frac and decimal ones.
|
Replace decodestring for decodebytes from base64 module
decodestring has been removed from py39 | @@ -87,10 +87,6 @@ XSD = "xs"
NS_SOAP_ENC = "http://schemas.xmlsoap.org/soap/encoding/"
-_b64_decode_fn = getattr(base64, 'decodebytes', base64.decodestring)
-_b64_encode_fn = getattr(base64, 'encodebytes', base64.encodestring)
-
-
class AttributeValueBase(SamlBase):
def __init__(self,
text=None,
@@ -232,10 +228,9 @@ class AttributeValueBase(SamlBase):
'base64Binary': {
'type': _str,
'to_type': _str,
- 'to_text': lambda x:
- _b64_encode_fn(x.encode())
- if base64encode
- else x,
+ 'to_text': (
+ lambda x: base64.encodebytes(x.encode()) if base64encode else x
+ ),
},
'anyType': {
'type': type(value),
|
Keep cfnlint import function-local (~1s)
Saves about 1s of startup time. | @@ -6,7 +6,6 @@ import yaml
import os
import string
-from cfnlint import decode, core
from moto.core import ACCOUNT_ID
@@ -62,6 +61,8 @@ def yaml_tag_constructor(loader, tag, node):
def validate_template_cfn_lint(template):
+ # Importing cfnlint adds a significant overhead, so we keep it local
+ from cfnlint import decode, core
# Save the template to a temporary file -- cfn-lint requires a file
filename = "file.tmp"
|
feat(monitoring): trace and tag push events
Fixes | @@ -160,6 +160,14 @@ async def push(
data: github_types.GitHubEvent,
score: typing.Optional[str] = None,
) -> None:
+ with tracer.trace(
+ "push event",
+ span_type="worker",
+ resource=f"{owner_login}/{repo_name}/{pull_number}",
+ ) as span:
+ span.set_tags(
+ {"gh_owner": owner_login, "gh_repo": repo_name, "gh_pull": pull_number}
+ )
now = date.utcnow()
event = msgpack.packb(
{
|
Update Hyundai firmware in values.py for 2021 Sonata
* Update Hyundai values.py for 2021 Sonata
Added firmware versions from a 2021 Hyundai Sonata bought in Southern California
* Update selfdrive/car/hyundai/values.py
* Update values.py | @@ -164,16 +164,19 @@ FW_VERSIONS = {
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.01 99110-L1000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CU- 1.00 1.00 99110-L0000 ',
+ b'\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00DN ESC \x01 102\x19\x04\x13 58910-L1300\xf1\xa01.02',
b'\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100\xf1\xa01.04',
+ b'\xf1\x8758910-L0100\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100\xf1\xa01.04',
],
(Ecu.engine, 0x7e0, None): [
b'HM6M2_0a0_BD0',
b'\xf1\x87391162M003\xf1\xa0000F',
b'\xf1\x87391162M003\xf1\xa00240',
+ b'HM6M1_0a0_F00',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x8756310-L1010\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1010 4DNDC103\xf1\xa01.03',
@@ -188,6 +191,7 @@ FW_VERSIONS = {
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00HT6TA260BLHT6TA800A1TDN8C20KS4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
+ b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x96\xa1\xf1\x92',
],
},
CAR.SANTA_FE: {
|
Removed SAX classes from xml/__init__.pyi
Removed class definitions from `xml/__init__.pyi` as they were merely outdated duplicates of the definitions from the correct file (`xml/sax/__init__.pyi`)
Left file intact as it is necessary for the module | -class SAXException(Exception):
- def __init__(self, msg, exception=None): ...
- def getMessage(self): ...
- def getException(self): ...
- def __getitem__(self, ix): ...
-
-class SAXParseException(SAXException):
- def __init__(self, msg, exception, locator): ...
- def getColumnNumber(self): ...
- def getLineNumber(self): ...
- def getPublicId(self): ...
- def getSystemId(self): ...
-
-class SAXNotRecognizedException(SAXException): ...
-class SAXNotSupportedException(SAXException): ...
-class SAXReaderNotAvailable(SAXNotSupportedException): ...
|
llvm: Cache generated argument structures
This reduces compilation times by ~15%. | @@ -59,6 +59,23 @@ class _node_wrapper():
def _gen_llvm_function(self, *, ctx, tags:frozenset):
return codegen.gen_node_wrapper(ctx, self._comp, self._node, tags=tags)
+def _comp_cached(func):
+ @functools.wraps(func)
+ def wrapper(bctx, obj):
+ try:
+ obj_cache = bctx._cache.setdefault(obj, dict())
+ except TypeError: # 'super()' references can't be cached
+ obj_cache = None
+ else:
+ if func in obj_cache:
+ return obj_cache[func]
+ val = func(bctx, obj)
+ if obj_cache is not None:
+ obj_cache[func] = val
+ return val
+
+ return wrapper
+
class LLVMBuilderContext:
__global_context = None
@@ -210,6 +227,7 @@ class LLVMBuilderContext:
})
return di_loc
+ @_comp_cached
def get_input_struct_type(self, component):
self._stats["input_structs_generated"] += 1
if hasattr(component, '_get_input_struct_type'):
@@ -218,6 +236,7 @@ class LLVMBuilderContext:
default_var = component.defaults.variable
return self.convert_python_struct_to_llvm_ir(default_var)
+ @_comp_cached
def get_output_struct_type(self, component):
self._stats["output_structs_generated"] += 1
if hasattr(component, '_get_output_struct_type'):
@@ -226,6 +245,7 @@ class LLVMBuilderContext:
default_val = component.defaults.value
return self.convert_python_struct_to_llvm_ir(default_val)
+ @_comp_cached
def get_param_struct_type(self, component):
self._stats["param_structs_generated"] += 1
if hasattr(component, '_get_param_struct_type'):
@@ -234,6 +254,7 @@ class LLVMBuilderContext:
params = component._get_param_values()
return self.convert_python_struct_to_llvm_ir(params)
+ @_comp_cached
def get_state_struct_type(self, component):
self._stats["state_structs_generated"] += 1
if hasattr(component, '_get_state_struct_type'):
@@ -242,6 +263,7 @@ class LLVMBuilderContext:
stateful = component._get_state_values()
return self.convert_python_struct_to_llvm_ir(stateful)
+ @_comp_cached
def get_data_struct_type(self, component):
self._stats["data_structs_generated"] += 1
if hasattr(component, '_get_data_struct_type'):
|
Fix:
Use state machine and state at correct slot to import block | @@ -388,16 +388,26 @@ class BeaconChain(BaseBeaconChain):
)
)
- head_state_slot = self.chaindb.get_head_state_slot()
- if head_state_slot >= block.slot:
- # Importing a block older than the head state. Hence head state can not be used to
- # perform state transition.
- prev_state_slot = parent_block.slot
+ # Default to use state machine and state at `parent_block.slot`.
+ # Change to the ones at head state slot if `block.slot` is on canonical chain
+ # and newer than head state slot.
+ state_machine = self.get_state_machine(at_slot=parent_block.slot)
+ state = self.get_state_by_slot(parent_block.slot)
+ try:
+ canonical_root_at_slot = self.get_canonical_block_root(parent_block.slot)
+ except BlockNotFound:
+ # No corresponding block at this slot which means parent block is not
+ # on canonical chain.
+ pass
else:
- prev_state_slot = head_state_slot
+ is_on_canonical_chain = parent_block.signing_root == canonical_root_at_slot
+ is_newer_than_head_state_slot = (
+ self.chaindb.get_head_state_slot() < block.slot
+ )
+ if is_on_canonical_chain and is_newer_than_head_state_slot:
+ state_machine = self.get_state_machine()
+ state = self.get_head_state()
- state_machine = self.get_state_machine(prev_state_slot)
- state = self.get_state_by_slot(prev_state_slot)
state, imported_block = state_machine.import_block(
block, state, check_proposer_signature=perform_validation
)
|
Seeds : Use IECore::MeshAlgo::distributePoints()
This is Cortex 10's replacement for the PointDistributionOp. | //
//////////////////////////////////////////////////////////////////////////
-#include "IECore/PointDistributionOp.h"
-#include "IECore/CompoundParameter.h"
+#include "IECore/MeshAlgo.h"
#include "Gaffer/StringPlug.h"
@@ -185,12 +184,12 @@ IECore::ConstObjectPtr Seeds::computeBranchObject( const ScenePath &parentPath,
return outPlug()->objectPlug()->defaultValue();
}
- PointDistributionOpPtr op = new PointDistributionOp();
- op->meshParameter()->setValue( mesh->copy() );
- op->densityParameter()->setNumericValue( densityPlug()->getValue() );
- op->parameters()->parameter<StringParameter>( "densityPrimVarName" )->setTypedValue( densityPrimitiveVariablePlug()->getValue() );
-
- PrimitivePtr result = runTimeCast<Primitive>( op->operate() );
+ PointsPrimitivePtr result = MeshAlgo::distributePoints(
+ mesh.get(),
+ densityPlug()->getValue(),
+ V2f( 0 ),
+ densityPrimitiveVariablePlug()->getValue()
+ );
result->variables["type"] = PrimitiveVariable( PrimitiveVariable::Constant, new StringData( pointTypePlug()->getValue() ) );
return result;
|
[hailgenetics/hail] add dill to the image
* [hailgenetics/hail] add dill to the image
This allows this to be used with PythonJob.
* remove conflict with pandas | @@ -17,8 +17,8 @@ RUN hail-pip-install \
ipython \
matplotlib \
numpy \
- pandas \
scikit-learn \
+ dill \
scipy \
&& rm -rf hail-*-py3-none-any.whl
RUN export SPARK_HOME=$(find_spark_home.py) && \
|
pkg_implementation_body_ada.mako: minor refactoring
TN: | @@ -2239,14 +2239,12 @@ package body ${ada_lib_name}.Implementation is
-- Is_Rebindable --
-------------------
- pragma Warnings (Off, "referenced");
- function Is_Rebindable (Node : ${T.root_node.name}) return Boolean
- is
- pragma Warnings (On, "referenced");
+ function Is_Rebindable (Node : ${T.root_node.name}) return Boolean is
begin
<% rebindable_nodes = [n for n in ctx.astnode_types
if n.annotations.rebindable] %>
% if not rebindable_nodes:
+ pragma Unreferenced (Node);
return True;
% else:
return Node.Kind in ${ctx.astnode_kind_set(rebindable_nodes)};
|
ENH: added Dendrogram.line_width property
[NEW] controls thickness of dendrogram edges | @@ -701,3 +701,12 @@ class Dendrogram(Drawable):
self._tips_as_text = value
self._traces = []
self.layout.annotations = ()
+
+ @property
+ def line_width(self):
+ """width of dendrogram lines"""
+ return self._line_width
+
+ @line_width.setter
+ def line_width(self, width):
+ self._line_width = width
|
Ignore ChannelParticipantLeft during iter_participants
Closes | @@ -155,7 +155,10 @@ class _ParticipantsIter(RequestIter):
users = {user.id: user for user in full.users}
for participant in full.full_chat.participants.participants:
- if isinstance(participant, types.ChannelParticipantBanned):
+ if isinstance(participant, types.ChannelParticipantLeft):
+ # See issue #3231 to learn why this is ignored.
+ continue
+ elif isinstance(participant, types.ChannelParticipantBanned):
user_id = participant.peer.user_id
else:
user_id = participant.user_id
|
Added some padding around functions for API
Summary:
Let me know if this looks OK and we can adjust.
Pull Request resolved: | @@ -217,7 +217,12 @@ table.modindextable td {
div.body {
min-width: 450px;
- max-width: 800px;
+ max-width: 900px;
+}
+
+dd {
+ padding-top: 10px;
+ padding-bottom: 5px;
}
div.body p, div.body dd, div.body li, div.body blockquote {
|
Always run accumulate_answers right before submitting a form
to prevent submitting data from a stale form that has since been updated. | @@ -486,7 +486,6 @@ WebFormSession.prototype.switchLanguage = function (lang) {
WebFormSession.prototype.submitForm = function (form) {
var self = this,
- answers,
accumulate_answers,
prevalidated = true;
@@ -514,17 +513,18 @@ WebFormSession.prototype.submitForm = function (form) {
_accumulate_answers(o);
return _answers;
};
- answers = accumulate_answers(form);
form.isSubmitting(true);
var submitAttempts = 0,
timer = setInterval(function () {
+ var answers;
if (form.blockSubmit() && submitAttempts < 10) {
submitAttempts++;
return;
}
clearInterval(timer);
+ answers = accumulate_answers(form);
self.serverRequest(
{
'action': Formplayer.Const.SUBMIT,
|
Switched arguments to django app + class name
Save user from looking up which database to use. | @@ -4,8 +4,10 @@ import logging
from django.core.management.base import BaseCommand
-from corehq.dbaccessors.couchapps.all_docs import get_all_docs_with_doc_types, get_doc_count_by_type
-from corehq.util.couchdb_management import couch_config
+from dimagi.utils.couch.database import iter_docs
+from dimagi.utils.modules import to_function
+
+from corehq.dbaccessors.couchapps.all_docs import get_doc_ids_by_class
logger = logging.getLogger(__name__)
@@ -25,21 +27,25 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- 'doc_type',
+ 'django_app',
)
parser.add_argument(
- '-s',
- '--db',
- dest='db',
- help='Slug for couch data base. Leave off if querying main commcarehq db.',
+ 'class_name',
)
- def handle(self, doc_type, **options):
- db = couch_config.get_db(options.get('db', None))
+ def handle(self, django_app, class_name, **options):
+ path = f"corehq.apps.{django_app}.models.{class_name}"
+ couch_class = to_function(path)
+ while not couch_class:
+ path = input(f"Could not find {path}, please enter path: ")
+ couch_class = to_function(path)
+ class_name = path.split(".")[-1]
+
key_counts = defaultdict(lambda: 0)
max_lengths = defaultdict(lambda: 0)
+ doc_ids = get_doc_ids_by_class(couch_class)
- print("Found {} {} docs\n".format(get_doc_count_by_type(db, doc_type), doc_type))
+ print("Found {} {} docs\n".format(len(doc_ids), class_name))
def _evaluate_doc(doc, prefix=None):
for key, value in doc.items():
@@ -57,8 +63,7 @@ class Command(BaseCommand):
max_lengths[key] = max(length, max_lengths[key])
key_counts[key] += 1
- docs = get_all_docs_with_doc_types(db, [doc_type])
- for doc in docs:
+ for doc in iter_docs(couch_class.get_db(), doc_ids):
_evaluate_doc(doc)
max_count = max(key_counts.values())
|
Fixes bug in simulation of circuits with CircuitLabels.
When operators for CircuitLabel labels were created, model._init_virtual_obj(op)
was never called, leaving the operator's .gpindices set to None. This causes
parameter-number AssertionError failures downstream when the model's .from_vector(...)
calls obj.from_vector(v[obj.gpindices]) on cached operations. This commit fixes
this issue by adding the call to _init_virtual_obj. | @@ -88,6 +88,8 @@ class LayerRules(object):
finalOp = _op.ExponentiatedOp(subCircuitOp, circuitlbl.reps, evotype=model.evotype)
else:
finalOp = subCircuitOp
+
+ model._init_virtual_obj(finalOp) # so ret's gpindices get set, essential for being in cache
return finalOp
def prep_layer_operator(self, model, layerlbl, cache):
|
Update Adafruit16CServoDriver.py
Merged and added examples for Arduino, RasPi and Esp8266_01 | -# PLEASE MERGE THIS FILE - SHOULD SHOW HOW TO USE ARDUINO OR RASPI BOTH !
-<<<<<<< HEAD
+# This example shows how to use the Adafruit16CServoDriver
+# It can be used with Arduino, RasPi or Esp8266_01
# From version 1.0.2316 use attach instead of setController
-# Start the Adafruit16CSe#rvodriver that can be used for all PCA9685 devices
-=======
+#
# Start the Adafruit16CServodriver that can be used for all PCA9685 devices
->>>>>>> master
-adaFruit16c = Runtime.createAndStart("AdaFruit16C","Adafruit16CServoDriver")
+adaFruit16c = Runtime.start("AdaFruit16C","Adafruit16CServoDriver")
#
# This part of the script is for the Arduino
-# Comment it out or delete it if you use the GPIO pins of the Raspberry PI
+# Comment it out the three lines below if you don't use the Arduino
# Change COM4 to the port where your Arduino is connected
-arduino = Runtime.createAndStart("Arduino","Arduino")
-arduino.connect("COM4")
-<<<<<<< HEAD
-# adaFruit16c.setController("Arduino","1","0x40")
-adaFruit16c.attach("Arduino","1","0x40")
-#
-# This part creates two servo instances
-=======
-adaFruit16c.setController("Arduino","1","0x40")
+arduino = Runtime.start("arduino","Arduino")
+arduino.connect("COM3")
+adaFruit16c.attach("arduino","0","0x40")
#
# This part of the script is if you use the GPOI pins of the Raspberry PI
-# Comment it out or delete it if you use an Arduino
-raspi = Runtime.createAndStart("RasPi","RasPi")
-adaFruit16c.setController("RasPi","1","0x40")
+# Comment it out the two lines below if you don't use the RasPi
+raspi = Runtime.createAndStart("raspi","RasPi")
+adaFruit16c.attach("raspi","1","0x40")
+#
+# This part of the script is if you use the Esp8266_01 service
+# Comment it out the two lines below if you don't use the Esp8266_01
+# Change COM4 to the port where your Arduino is connected
+esp = Runtime.start("esp","Esp8266_01")
+adaFruit16c.attach("esp","1","0x40")
#
# This part is common for both devices and creates two servo instances
->>>>>>> master
# on port 3 and 8 on the Adafruit16CServoDriver
# Change the names of the servos and the pin numbers to your usage
-thumb = Runtime.createAndStart("Thumb", "Servo")
-elbow = Runtime.createAndStart("Elbow", "Servo")
+thumb = Runtime.start("Thumb", "Servo")
+elbow = Runtime.start("Elbow", "Servo")
# attach it to the pwm board - pin 3 & 8
thumb.attach(adaFruit16c,3)
elbow.attach(adaFruit16c,8)
|
ffu: Introduce prep workarounds for FFU
We make sure is_bootstrap_node is always set and we reset hiera
hierarchy on first run.
Resolves: rhbz#1535406
Clodes-Bug: | @@ -372,6 +372,7 @@ outputs:
global_vars:
deploy_steps_max: {{deploy_steps_max}}
common_deploy_steps_tasks: {get_file: deploy-steps-tasks.yaml}
+ docker_puppet_script: {get_file: ../docker/docker-puppet.py}
deploy_steps_playbook:
str_replace:
params:
@@ -571,6 +572,34 @@ outputs:
- include_tasks: fast_forward_upgrade_prep_tasks.yaml
- include_tasks: fast_forward_upgrade_bootstrap_tasks.yaml
fast_forward_upgrade_prep_tasks: |
+{%- for role in roles %}
+ - shell: |
+ #!/bin/bash
+ if [ ! -f /root/.ffu_workaround ]; then
+ touch /root/.ffu_workaround
+ os-apply-config -m /var/lib/os-collect-config/{{role.deprecated_server_resource_name|default(role.name)}}Deployment.json
+ systemctl stop os-collect-config
+ rm -r /var/lib/os-collect-config/*
+ rm -f /usr/libexec/os-refresh-config/configure.d/40-hiera-datafiles
+ rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml
+ rm -f /usr/libexec/os-refresh-config/configure.d/10-hiera-disable
+ fi
+ when: role_name == '{{role.name}}'
+ name: Run Fast Forward Upgrade Prep Workarounds for {{role.name}}
+{%- endfor %}
+{% raw %}
+ - name: get bootstrap nodeid
+ tags: common
+ command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid
+ register: bootstrap_node
+ - name: set is_bootstrap_node fact
+ tags: common
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+{% endraw %}
+ - name: Create /var/lib/docker-puppet
+ file: path=/var/lib/docker-puppet state=directory setype=svirt_sandbox_file_t selevel=s0 recurse=true
+ - name: Write docker-puppet.py
+ copy: src=docker_puppet_script.yaml dest=/var/lib/docker-puppet/docker-puppet.py force=yes mode=0600
- include_tasks: fast_forward_upgrade_prep_role_tasks.yaml
with_sequence: start=0 end={{fast_forward_upgrade_prep_steps_max}}
loop_control:
|
Logging stuffs
Summary:
Pull Request resolved:
Add more logging and flag. | @@ -498,6 +498,9 @@ NetDef OnnxifiTransformer::SubnetToOnnxifiOpViaC2(
// Debugging stuff
if (opts_.debug) {
+ WriteProtoToTextFile(
+ net,
+ "debug_original_net_" + c10::to_string(onnxifi_op_id_) + ".pb_txt");
WriteProtoToTextFile(
onnxifi_net,
"debug_onnxifi_net_" + c10::to_string(onnxifi_op_id_) + ".pb_txt");
@@ -903,6 +906,11 @@ void OnnxifiTransformer::transform(
// Need to figure out a proper place to handle device option
net_opt.mutable_device_option()->CopyFrom(pred_net->device_option());
+
+ if (opts_.debug) {
+ WriteProtoToTextFile(*pred_net, "debug_full_pred_net.pb_txt");
+ WriteProtoToTextFile(net_opt, "debug_full_opt_net.pb_txt");
+ }
pred_net->Swap(&net_opt);
}
|
Update __init__.py
Deleted redudant line 29 "from pyquil.api.compiler import CompilerConnection" | @@ -26,7 +26,6 @@ from pyquil.api.job import Job
from pyquil.api.compiler import CompilerConnection
from pyquil.api.qvm import QVMConnection, QVM
from pyquil.api.qpu import QPUConnection, get_devices, QPU
-from pyquil.api.compiler import CompilerConnection
from pyquil.device import Device
from pyquil.api.wavefunction_simulator import WavefunctionSimulator
from pyquil.api._base_connection import ForestConnection
|
Deseasonify: guard bot nickname setter
Previously we'd always set the nickname, as the BaseSeason class
provides a default. However, it feels cleaner to also guard this,
in case a specific season decides to override the attr to something
falsey. | @@ -292,6 +292,7 @@ class BrandingManager(commands.Cog):
# await self.bot.set_avatar(self.avatar.download_url)
log.info(f"Applying avatar: {self.avatar.download_url}")
+ if self.current_season.bot_name:
# await self.bot.set_nickname(self.current_season.bot_name)
log.info(f"Applying nickname: {self.current_season.bot_name}")
|
replaceCategoryLinks: prevent failing on dewiki with {{Personendaten}}
The script involving replaceCategoryLinks should not break
but instead should skip the page on German Wikipedia with
{{Personendaten}} template | @@ -1227,11 +1227,12 @@ def replaceCategoryLinks(oldtext, new, site=None, addOnly=False):
if site is None:
site = pywikibot.Site()
if site.sitename == 'wikipedia:de' and '{{Personendaten' in oldtext:
- raise pywikibot.Error(
+ pywikibot.error(
'The Pywikibot is no longer allowed to touch categories on the '
'German\nWikipedia on pages that contain the Personendaten '
'template because of the\nnon-standard placement of that template.\n'
'See https://de.wikipedia.org/wiki/Hilfe:Personendaten#Kopiervorlage')
+ return oldtext
separator = site.family.category_text_separator
iseparator = site.family.interwiki_text_separator
separatorstripped = separator.strip()
|
Enhance user documentation for .env_group
TN: | @@ -275,14 +275,13 @@ def env_group(self, env_array, with_md=None):
"""
Return a new lexical environment that logically groups together multiple
environments. `env_array` must be an array that contains the environments
- to be grouped.
+ to be grouped. If it is empty, the empty environment is returned.
- :param AbstractExpression env_array: Expression that will return
- an array of lexical environments. If this array is empty, the empty
- environment is returned.
+ If provided, `with_md` must be a metadata structure: it will be made the
+ default metadata for this lexical environment.
- :param AbstractExpression with_md: Optional metadata struct. If passed,
- then it will be made the default metadata for this lexical environment.
+ :type env_array: AbstractExpression
+ :type with_md: AbstractExpression
"""
from langkit.expressions import No
|
Simplify init container env var assert, update order
python3 dicts are ordered so some tests relying on this fails | @@ -355,10 +355,12 @@ class TestStrongboxSecrets(object):
init_container = deployment.spec.template.spec.initContainers[0]
assert init_container is not None
- assert 3 == len(init_container.env)
- self._assert_env_var(init_container.env[0], "K8S_DEPLOYMENT", app_spec.name)
- self._assert_env_var(init_container.env[1], "AWS_REGION", self.AWS_REGION)
- self._assert_env_var(init_container.env[2], "SECRET_GROUPS", self.GROUPS)
+ expected = [
+ EnvVar(name="AWS_REGION", value=self.AWS_REGION),
+ EnvVar(name="SECRET_GROUPS", value=self.GROUPS),
+ EnvVar(name="K8S_DEPLOYMENT", value=app_spec.name),
+ ]
+ assert init_container.env == expected
def test_annotations(self, deployment, app_spec, strongbox_secrets, secrets_spec):
strongbox_secrets.apply(deployment, app_spec, secrets_spec)
@@ -368,10 +370,3 @@ class TestStrongboxSecrets(object):
CANARY_NAME: CANARY_VALUE,
"iam.amazonaws.com/role": self.IAM_ROLE
}
-
- @staticmethod
- def _assert_env_var(env_var, name, value):
- __tracebackhide__ = True
-
- assert name == env_var.name
- assert value == env_var.value
|
Add scikit-hep reference
Add scikit-hep reference | @@ -24,7 +24,7 @@ zfit: scalable pythonic fitting
zfit is a highly scalable and customizable model manipulation and fitting library. It uses
`TensorFlow <https://www.tensorflow.org/>`_ as its computational backend
-and is optimised for simple and direct manipulation of probability density functions.
+and is optimised for simple and direct manipulation of probability density functions. The project affiliated with and well integrated into `scikit-hep <https://scikit-hep.org/>`, the HEP Python ecosystem.
- **Tutorials**: `Interactive IPython Tutorials <https://github.com/zfit/zfit-tutorials>`_
- **Quick start**: `Example scripts <examples>`_
|
fixing typos
I just randomly find these by the way. Good work on the framework! | @@ -30,7 +30,7 @@ class BaseStrategy:
Parameters
-----------
- score_series : pd.Seires
+ score_series : pd.Series
stock_id , score.
current : Position()
current state of position.
|
Fix property 'disable_auto_refresh'
TypeError: Cannot read property 'disable_auto_refresh' of undefined | @@ -1044,7 +1044,7 @@ frappe.views.ListView = class ListView extends frappe.views.BaseList {
}
setup_realtime_updates() {
- if (this.list_view_settings.disable_auto_refresh) {
+ if (this.list_view_settings && this.list_view_settings.disable_auto_refresh) {
return;
}
frappe.realtime.on('list_update', data => {
|
Fix, tidy, re-enable outputs= related test
The test was previously not waiting for tasks to finish,
which was a race condition. | import argparse
import os
+import pytest
import shutil
-import pytest
+from concurrent.futures import wait
-from parsl.app.app import python_app
+from parsl import File, python_app
from parsl.tests.configs.local_threads import config
@@ -21,8 +22,7 @@ def double(x, outputs=[]):
whitelist = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'configs', '*threads*')
-# @pytest.mark.whitelist(whitelist, reason='broken in IPP')
[email protected]("Broke somewhere between PR #525 and PR #652")
[email protected]
def test_launch_apps(n=2, outdir='outputs'):
if not os.path.exists(outdir):
os.makedirs(outdir)
@@ -31,17 +31,17 @@ def test_launch_apps(n=2, outdir='outputs'):
os.makedirs(outdir)
print('outdir is ', outdir)
- all_futs = {}
+ all_futs = []
for i in range(n):
- fus = double(i, outputs=['{0}/{1}.txt'.format(outdir, i)])
- print(fus.outputs)
- all_futs[fus] = fus
+ fus = double(i, outputs=[File('{0}/{1}.txt'.format(outdir, i))])
+ all_futs.append(fus)
+
+ wait(all_futs)
stdout_file_count = len(
[item for item in os.listdir(outdir) if item.endswith('.txt')])
assert stdout_file_count == n, "Only {}/{} files in '{}' ".format(
len(os.listdir('outputs/')), n, os.listdir(outdir))
- print("[TEST STATUS] test_parallel_for [SUCCESS]")
if __name__ == '__main__':
|
tests/report: Drop redundant calls to clear_registry
All registries are cleared after every test. | @@ -14,8 +14,6 @@ class TestReport():
def test_reportOutputPref_true(self):
- pnl.clear_registry(pnl.FunctionRegistry)
-
t = pnl.TransferMechanism()
t.reportOutputPref = ReportOutput.FULL
@@ -30,8 +28,6 @@ class TestReport():
def test_reportOutputPref_params(self):
- pnl.clear_registry(pnl.FunctionRegistry)
-
t = pnl.TransferMechanism()
t.reportOutputPref = 'params'
@@ -56,7 +52,6 @@ class TestReport():
def test_simple_output_and_progress(self):
"""Test simple sequence of three Mechanisms, using all report_output and report_progress options
"""
- pnl.clear_registry(pnl.FunctionRegistry)
a = pnl.TransferMechanism(name='a')
b = pnl.TransferMechanism(name='b')
@@ -186,8 +181,6 @@ class TestReport():
composition, using input dictionary, generator instance and generator function.
"""
- pnl.clear_registry(pnl.FunctionRegistry)
-
# instantiate mechanisms and inner comp
ia = pnl.TransferMechanism(name='ia')
ib = pnl.TransferMechanism(name='ib')
@@ -448,7 +441,6 @@ class TestReport():
def test_nested_comps_and_sims_with_modulated_and_monitored_params_and_use_prefs(self):
- pnl.clear_registry(pnl.FunctionRegistry)
# instantiate mechanisms and inner comp
ia = pnl.TransferMechanism(name='ia')
ib = pnl.TransferMechanism(name='ib')
|
Additional user information
Specify users call 'method = full_template' when using the manual wavelength
calibration output of `pypeit_identify`.
modified: pypeit/core/gui/identify.py | @@ -725,8 +725,9 @@ class Identify:
msgs.info("Your arxiv solution has been written to ./wvarxiv.fits\n")
msgs.info(f"Your arxiv solution has been cached.{msgs.newline()}"
- f"Use 'reid_arxiv = {cachename}' in your{msgs.newline()}"
- "PypeIt Reduction File to utilize this wavelength solution.")
+ f"Use 'reid_arxiv = {cachename}' and{msgs.newline()}"
+ f"'method = full_template' in your PypeIt{msgs.newline()}"
+ "Reduction File to utilize this wavelength solution.")
# Write the WVCalib file
outfname = "wvcalib.fits"
|
Update avcodecs.py
fix codec name | @@ -856,7 +856,7 @@ class H265VAAPI(H265Codec):
H.265/AVC VAAPI ideo codec.
"""
codec_name = 'h265vaapi'
- ffmpeg_codec_name = 'h265_vaapi'
+ ffmpeg_codec_name = 'hevc_vaapi'
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
optlist = super(H265VAAPI, self)._codec_specific_produce_ffmpeg_list(safe, stream)
|
Fixed minor bug in standard deviation policy
std jacobian at least 2d | @@ -232,7 +232,7 @@ class MultivariateStateStdGaussianPolicy:
# Compute variance derivative
w = (delta**2 - diag_sigma) * std / diag_sigma**2
- j_sigma = self._std_approximator.diff(state).T
+ j_sigma = np.atleast_2d(self._std_approximator.diff(state).T)
g_sigma = np.atleast_1d(w.dot(j_sigma))
return np.concatenate((g_mu, g_sigma), axis=0)
|
Update __init__.py
# BUGFIX: remove_fields_space() function will drop Feature object field | @@ -518,7 +518,7 @@ def remove_fields_space(fields: [list, str, tuple]):
"""
if isinstance(fields, str):
return fields.replace(" ", "")
- return [i.replace(" ", "") for i in fields if isinstance(i, str)]
+ return [i.replace(" ", "") if isinstance(i, str) else str(i) for i in fields]
def normalize_cache_fields(fields: [list, tuple]):
|
Free up some disk space on the runner
for update_combined_federal data | @@ -12,6 +12,14 @@ jobs:
run:
runs-on: ubuntu-latest
steps:
+ - name: Maximize build space
+ uses: easimon/maximize-build-space@master
+ with:
+ root-reserve-mb: 512
+ swap-size-mb: 1024
+ remove-dotnet: 'true'
+ remove-android: 'true'
+ remove-haskell: 'true'
- uses: actions/checkout@v2
- uses: actions/cache@v2
with:
|
Update Jenkinsfile-undeploy
try adding primaryBranch: "master" argument to tadaUndeployEachBranch | @@ -19,7 +19,7 @@ pipeline {
booleanParam(
name: "UNDEPLOY_MERGED_BRANCHES",
defaultValue: true,
- description: "Undeploy branches that have been merged into the main branch (but not deleted) from staging.",
+ description: "Undeploy branches that have been merged into the master branch (but not deleted) from staging.",
)
string(
name: "UNDEPLOY_BRANCH_NAMES",
@@ -49,7 +49,7 @@ pipeline {
stage("undeploy-branches") {
steps {
withKubeConfig([credentialsId: "kubeconfig-nrel-test"]) {
- tadaUndeployEachBranch(rancherProject: env.RANCHER_PROJECT, undeployDeletedBranches: params.UNDEPLOY_DELETED_BRANCHES, undeployMergedBranches: params.UNDEPLOY_MERGED_BRANCHES, undeployBranchNames: params.UNDEPLOY_BRANCH_NAMES) {
+ tadaUndeployEachBranch(rancherProject: env.RANCHER_PROJECT, undeployDeletedBranches: params.UNDEPLOY_DELETED_BRANCHES, undeployMergedBranches: params.UNDEPLOY_MERGED_BRANCHES, undeployBranchNames: params.UNDEPLOY_BRANCH_NAMES, primaryBranch: "master") {
tadaWithWerfEnv(rancherProject: env.RANCHER_PROJECT, dbBaseName: env.DB_BASE_NAME) {
tadaUndeployBranch()
}
|
updated gan readme
includes pictures | @@ -39,3 +39,37 @@ to copy kaggle.json into a folder first.
```
python run.py -config ./configs/dcgan.yaml
```
+## Results
+
+| Model | Paper | Samples |
+|------------------------------------------------------------------------|--------------------------------------------------|---------|
+| GAN ([Code][dcgan_code], [Config][dcgan_config]) |[Link](https://arxiv.org/abs/1406.2661) | ![][1] |
+| SNGAN ([Code][sngan_code], [Config][sngan_config]) |[Link](https://arxiv.org/abs/1802.05957) | ![][2] |
+| LOGAN ([Code][logan_code], [Config][logan_config]) |[Link](https://arxiv.org/abs/1912.00953) | ![][3] |
+| WGAN ([Code][wgan_code], [Config][wgan_config]) |[Link](https://arxiv.org/abs/1701.07875) | ![][4] |
+| GP-WGAN ([Code][gp_wgan_code], [Config][gp_wgan_config]) |[Link](https://arxiv.org/pdf/1704.00028.pdf) | ![][5] |
+
+
+## Acknowledgement
+
+The idea of this zoo and some of the scripts were based on Anand Krishnamoorthy [Pytorch-VAE library](https://github.com/AntixK/PyTorch-VAE), we also used the script from [sayantanauddy](https://github.com/sayantanauddy/vae_lightning) to transform and download the celeba from kaggle.
+
+-----------
+
+[dcgan_code]: https://raw.githubusercontent.com/probml/pyprobml/master/scripts/gan/models/dcgan.py
+[gp_wgan_code]: https://raw.githubusercontent.com/probml/pyprobml/master/scripts/gan/models/gp_wgan.py
+[logan_code]: https://raw.githubusercontent.com/probml/pyprobml/master/scripts/gan/models/logan.py
+[sngan_code]: https://raw.githubusercontent.com/probml/pyprobml/master/scripts/gan/models/sngan.py
+[wgan_code]: https://github.com/probml/pyprobml/blob/master/scripts/gan/models/wgan.py
+
+[dcgan_config]: https://github.com/probml/pyprobml/blob/master/scripts/gan/configs/dcgan.yaml
+[gp_wgan_config]: https://github.com/probml/pyprobml/blob/master/scripts/gan/configs/gp_wgan.yaml
+[logan_config]: https://github.com/probml/pyprobml/blob/master/scripts/gan/configs/logan.yaml
+[sngan_config]: https://github.com/probml/pyprobml/blob/master/scripts/gan/configs/sngan.yaml
+[wgan_config]: https://github.com/probml/pyprobml/blob/master/scripts/gan/configs/wgan.yaml
+
+[1]: https://github.com/probml/pyprobml/blob/master/scripts/gan/assets/dcgan.png
+[2]: https://github.com/probml/pyprobml/blob/master/scripts/gan/assets/logan.png
+[3]: https://github.com/probml/pyprobml/blob/master/scripts/gan/assets/sngan.png
+[4]: https://github.com/probml/pyprobml/blob/master/scripts/gan/assets/wgan.png
+[5]: https://github.com/probml/pyprobml/blob/master/scripts/gan/assets/gp_wgan.png
|
fix: Use ImportError instead of ModuleNotFoundError
ModuleNotFoundError is available in python 3.6
This should have been included in Frappe PR | @@ -18,7 +18,7 @@ class PrintSettings(Document):
printer_list = []
try:
import cups
- except ModuleNotFoundError:
+ except ImportError:
frappe.throw("You need to install pycups to use this feature!")
return
try:
|
Add `try/except` to allow for cases where a database file is irrelevant
This is necessary in order to pass `tests/test_cli_parsing.py` | @@ -116,6 +116,7 @@ def run_main(argv: Optional[List[str]] = None) -> int:
add_log_headers()
# Check the database file exists, if one is given
+ try:
if args.dbpath:
logger.info("Checking for database file: {args.dbpath}")
if not os.path.isfile(args.dbpath):
@@ -123,6 +124,8 @@ def run_main(argv: Optional[List[str]] = None) -> int:
f"No database file at {args.dbpath}. Create one using `pyani createdb`."
)
return 0
+ except AttributeError:
+ pass
# Run the subcommand
returnval = args.func(args)
|
ENH: simplify low_0_bit function for Sobol
* ENH: simplify low_0_bit function for Sobol
This patch simplify the low_0_bit function which finds an index
of the rightmost zero bit. Consequently it obtains a slight
performance improvement.
* Apply suggestions from code review | @@ -138,14 +138,10 @@ cdef int low_0_bit(const int x) nogil:
Position of the right-most 0 bit.
"""
- cdef int z = x
cdef int i = 0
- while True:
+ while x & (1 << i) != 0:
i += 1
- if z % 2 == 0:
- break
- z = z // 2
- return i
+ return i + 1
@cython.boundscheck(False)
|
ebd/ebuild.bash: avoid writing env to fs during pkg_pretend phase
Alleviates some race issues with file permissions when running threaded
sanity checks. | @@ -231,7 +231,7 @@ __generate_initial_ebuild_environ() {
fi
__ensure_PATH "${PKGCORE_EXISTING_PATH}"
- if [[ -n ${T} ]]; then
+ if [[ -n ${T} && ${EBUILD_PHASE} != "pretend" ]]; then
# Use a file if possible; faster since bash does this lovely byte by
# byte reading if it's a pipe. Having the file around is useful for
# debugging also.
|
Closes:
Fix to interpret subsequent points of absolute MoveTo (M) command as absolute LineTo (L). | @@ -298,7 +298,7 @@ class SVGMobject(VMobject):
if not isinstance(element, minidom.Element):
return
if element.hasAttribute('id'):
- return element
+ return [element]
for e in element.childNodes:
all_childNodes_have_id.append(self.get_all_childNodes_have_id(e))
return self.flatten([e for e in all_childNodes_have_id if e])
@@ -371,9 +371,9 @@ class VMobjectFromSVGPathstring(VMobject):
new_points = new_points[1:]
command = "L"
- # Treat everything as relative line-to until empty
for p in new_points:
- # Treat as relative
+ if isLower:
+ # Treat everything as relative line-to until empty
p[0] += self.points[-1, 0]
p[1] += self.points[-1, 1]
self.add_line_to(p)
|
Fixes bug in build_explicit_model (qubit dim check in state space
labels was 2 and should have been 4). | @@ -531,8 +531,9 @@ def basis_build_explicit_model(stateSpaceLabels, basis,
effects = []
if ELbls == "standard":
+ qubit_dim = 4 # 2 if evotype in ('statevec', 'stabilizer') else 4
if stateSpaceLabels.num_tensor_prod_blocks() == 1 and \
- all([ldim == 2 for ldim in stateSpaceLabels.tensor_product_block_dims(0)]):
+ all([ldim == qubit_dim for ldim in stateSpaceLabels.tensor_product_block_dims(0)]):
# a single tensor product block comprised of qubits: '000', '001', etc.
nQubits = len(stateSpaceLabels.tensor_product_block_dims(0))
ELbls = [''.join(t) for t in _itertools.product(('0', '1'), repeat=nQubits)]
|
[bugfix] Add plural support to archivebot-older-than
Depends-On: | @@ -380,23 +380,22 @@ class DiscussionThread(object):
"""
Check whether thread has to be archived.
- @return: archiving reason i18n string or empty string.
- @rtype: str
+ @return: the archivation reason as a dict of localization args
+ @rtype: dict
"""
+ # Archived by timestamp
algo = archiver.get_attr('algo')
re_t = re.search(r'^old\((.*)\)$', algo)
if re_t:
if not self.timestamp:
- return ''
- # TODO: handle this:
- # return 'unsigned'
+ return None
+ # TODO: handle unsigned
maxage = str2time(re_t.group(1), self.timestamp)
if self.now - self.timestamp > maxage:
duration = str2localized_duration(archiver.site, re_t.group(1))
- return i18n.twtranslate(self.code,
- 'archivebot-older-than',
- {'duration': duration})
- return ''
+ return {'duration': duration}
+ # TODO: handle marked with template
+ return None
class DiscussionPage(pywikibot.Page):
@@ -692,7 +691,18 @@ class PageArchiver(object):
self.comment_params['archives'] \
= comma.join(a.title(as_link=True)
for a in self.archives.values())
- self.comment_params['why'] = comma.join(whys)
+ # Find out the reasons and return them localized
+ translated_whys = set()
+ for why, arg in whys.items():
+ # Archived by timestamp
+ if why == 'duration':
+ translated_whys.add(
+ i18n.twtranslate(self.site.code,
+ 'archivebot-older-than',
+ {'duration': arg,
+ 'count': self.archived_threads}))
+ # TODO: handle unsigned or archived by template
+ self.comment_params['why'] = comma.join(translated_whys)
comment = i18n.twtranslate(self.site.code,
'archivebot-page-summary',
self.comment_params)
|
Define seperate target for proxy minions
Proxy minions require a lot more to reload than regular minions do. Provide a
seperate _target for proxy minions so that the `__proxy__` dictionary gets
reloaded by windows proxy minions. | @@ -3242,3 +3242,62 @@ class ProxyMinion(Minion):
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
+
+ @classmethod
+ def _target(cls, minion_instance, opts, data, connected):
+ if not minion_instance:
+ minion_instance = cls(opts)
+ minion_instance.connected = connected
+ if not hasattr(minion_instance, 'functions'):
+ # Need to load the modules so they get all the dunder variables
+ functions, returners, function_errors, executors = (
+ minion_instance._load_modules(grains=opts['grains'])
+ )
+ minion_instance.functions = functions
+ minion_instance.returners = returners
+ minion_instance.function_errors = function_errors
+ minion_instance.executors = executors
+
+ # Pull in the utils
+ minion_instance.utils = salt.loader.utils(minion_instance.opts)
+
+ # Then load the proxy module
+ minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
+
+ # And re-load the modules so the __proxy__ variable gets injected
+ functions, returners, function_errors, executors = (
+ minion_instance._load_modules(grains=opts['grains'])
+ )
+ minion_instance.functions = functions
+ minion_instance.returners = returners
+ minion_instance.function_errors = function_errors
+ minion_instance.executors = executors
+
+ minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
+ minion_instance.proxy.pack['__salt__'] = minion_instance.functions
+ minion_instance.proxy.pack['__ret__'] = minion_instance.returners
+ minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
+
+ # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
+ minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
+ minion_instance.proxy.pack['__utils__'] = minion_instance.utils
+
+ # Reload all modules so all dunder variables are injected
+ minion_instance.proxy.reload_modules()
+
+ fq_proxyname = opts['proxy']['proxytype']
+ proxy_init_fn = minion_instance.proxy[fq_proxyname+'.init']
+ proxy_init_fn(opts)
+ if not hasattr(minion_instance, 'serial'):
+ minion_instance.serial = salt.payload.Serial(opts)
+ if not hasattr(minion_instance, 'proc_dir'):
+ uid = salt.utils.get_uid(user=opts.get('user', None))
+ minion_instance.proc_dir = (
+ get_proc_dir(opts['cachedir'], uid=uid)
+ )
+
+ with tornado.stack_context.StackContext(minion_instance.ctx):
+ if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
+ Minion._thread_multi_return(minion_instance, opts, data)
+ else:
+ Minion._thread_return(minion_instance, opts, data)
|
Fix processing `ChangedMasterCopy` event
`ChangedMasterCopy` event was supposed to generate an argument `_masterCopy` instead of `masterCopy`. Due to a typo, it was not. | @@ -318,8 +318,8 @@ class SafeEventsIndexer(EventsIndexer):
internal_tx_decoded = None
elif event_name == "ChangedMasterCopy":
internal_tx_decoded.function_name = "changeMasterCopy"
- internal_tx.arguments = {
- "_masterCopy": args.get("singleton") or args.get("masterCopy")
+ internal_tx_decoded.arguments = {
+ "_masterCopy": args.get("masterCopy") or args.get("singleton")
}
else:
# 'SignMsg', 'ExecutionFailure', 'ExecutionSuccess',
|
Set GUI binary name to chia-blockchain in the Ubuntu DEB
set gui binary name to chia-blockchain | @@ -83,17 +83,21 @@ if [ "$PLATFORM" = "arm64" ]; then
sudo gem install public_suffix -v 4.0.7
sudo gem install fpm
echo USE_SYSTEM_FPM=true electron-builder build --linux deb --arm64 \
+ --config.extraMetadata.name=chia-blockchain \
--config.productName="$PRODUCT_NAME" --config.linux.desktop.Name="Chia Blockchain" \
--config.deb.packageName="chia-blockchain"
USE_SYSTEM_FPM=true electron-builder build --linux deb --arm64 \
+ --config.extraMetadata.name=chia-blockchain \
--config.productName="$PRODUCT_NAME" --config.linux.desktop.Name="Chia Blockchain" \
--config.deb.packageName="chia-blockchain"
LAST_EXIT_CODE=$?
else
echo electron-builder build --linux deb --x64 \
+ --config.extraMetadata.name=chia-blockchain \
--config.productName="$PRODUCT_NAME" --config.linux.desktop.Name="Chia Blockchain" \
--config.deb.packageName="chia-blockchain"
electron-builder build --linux deb --x64 \
+ --config.extraMetadata.name=chia-blockchain \
--config.productName="$PRODUCT_NAME" --config.linux.desktop.Name="Chia Blockchain" \
--config.deb.packageName="chia-blockchain"
LAST_EXIT_CODE=$?
|
Remove test string in pillow checkpoint id
This no longer needs the string test because it does not write to a test
ES index. | "LedgerToElasticsearchPillow": {
"advertised_name": "LedgerToElasticsearchPillow",
"change_feed_type": "KafkaChangeFeed",
- "checkpoint_id": "LedgerToElasticsearchPillow-test_ledgers_2016-03-15",
+ "checkpoint_id": "LedgerToElasticsearchPillow-ledgers_2016-03-15",
"full_class_name": "pillowtop.pillow.interface.ConstructedPillow",
"name": "LedgerToElasticsearchPillow"
},
|
Fixed problems with "fit_windows" not None in Measurement_analysis
If fit_windows is not None the hanger analysis crashes returining an
error due to different dimensions for x and y data.
This commit solves the problem | @@ -3850,7 +3850,7 @@ class Homodyne_Analysis(MeasurementAnalysis):
self.save_fig(fig, figname='complex', **kw)
self.save_fig(fig2, xlabel='Mag', **kw)
else:
- ax.plot(self.sweep_points, fit_res.best_fit, 'r-')
+ ax.plot(data_x, fit_res.best_fit, 'r-')
f0 = self.fit_results.values['f0']
plt.plot(f0, fit_res.eval(f=f0), 'o', ms=8)
|
Update Pack README
Done. | -# Azure DevOps Pack
Use the Azure DevOps pack to manage Git repositories in Azure DevOps services. Microsoft Azure DevOps Server provides version control, reporting, requirements management, project management, automated builds, testing and release management capabilities. It covers the entire application lifecycle, and enables DevOps capabilities.<br>
## What does this pack do?
-- Test the connectivity to Azure.
-- Start, complete, or rerun the authorization process.
- Get mapping fields from a remote incident.
-- Run a pipeline.
+- Run a pipeline. A DevOps pipeline is a set of automated processes and tools that allows both developers and operations professionals to work cohesively to build and deploy code to a production environment.
- Add a user, assign the user a license and extensions, and make the user a member of a project group in an account.
- Remove the user from all project memberships.
-- Create, update, or retrieve a new pull request.
-- Retrieve pull requests in repository.
+- Create, update, or retrieve a pull request.
+- Retrieve pull requests in a repository.
- Retrieve all projects in the organization that the authenticated user has access to.
-- Retrieve git repositories in the organization project.
+- Retrieve Git repositories in the organization project.
- Query users that were added to organization projects.
- Retrieve information for a pipeline run.
-- Retrieve pipeline runs list.
-- Retrieve project pipelines list.
-- Retrieve repository branches list.
+- Retrieve a list of pipeline runs, project pipelines, or repository branches.
+
This pack contains an integration, whose main purpose is to manage Git repositories in Azure DevOps Services.<br>
+
|
format fix
fixing formatting (line too long) | @@ -275,7 +275,10 @@ class SceneWidget(glooey.Widget):
self.vertex_list[geometry_name].delete()
# convert geometry to constructor args
- args = rendering.convert_to_vertexlist(geometry, group=mesh_group, smooth=self._smooth)
+ args = rendering.convert_to_vertexlist(
+ geometry,
+ group=mesh_group,
+ smooth=self._smooth)
# create the indexed vertex list
self.vertex_list[geometry_name] = self.batch.add_indexed(*args)
# save the MD5 of the geometry
|
Add select_prefetch_join() to bench.
[skip ci] | @@ -117,6 +117,14 @@ def select_prefetch(i):
for i in c.items:
pass
+@timed
+def select_prefetch_join(i):
+ query = prefetch(Collection.select(), Item,
+ prefetch_type=PREFETCH_TYPE.JOIN)
+ for c in query:
+ for i in c.items:
+ pass
+
if __name__ == '__main__':
db.create_tables([Register, Collection, Item])
@@ -138,4 +146,5 @@ if __name__ == '__main__':
select_related_dicts()
select_related_dbapi_raw()
select_prefetch()
+ select_prefetch_join()
db.drop_tables([Register, Collection, Item])
|
Fix lint on master
Test Plan: N/A
Reviewers: schrockn, max | from dagster import check, PipelineDefinition
from dagster.core.execution.api import create_execution_plan
-from dagster.utils.indenting_printer import IndentingStringIoPrinter
from .operators import DagsterDockerOperator, DagsterOperator, DagsterPythonOperator
from .compile import coalesce_execution_steps
|
Remove unnecessary comment
cr | @@ -57,7 +57,6 @@ class InitController(AbstractBaseController):
epilog = strings['init.epilog']
def do_command(self):
- # get arguments
interactive = self.app.pargs.interactive
region_name = self.app.pargs.region
noverify = self.app.pargs.no_verify_ssl
|
Update data_load.py
Add more mapping | import numpy as np
+from ..datasets.amazon import AmazonInstantVideo
from ..datasets.dunnhumby import Dunnhumby
from ..datasets.epinions import Epinions
from ..datasets.instacart import Instacart, Instacart_25
from ..datasets.last_fm import LastFM
-from ..datasets.movielens import Movielens_1m, Movielens_25m, Movielens_100k
+from ..datasets.movielens import Movielens_1m, Movielens_10m, Movielens_25m, Movielens_100k
from ..datasets.tafeng import Tafeng
+from ..datasets.yelp import Yelp
from ..utils.common_util import print_dict_as_table
@@ -93,6 +95,9 @@ def load_split_dataset(config):
"dunnhumby": Dunnhumby,
"instacart": Instacart,
"instacart_25": Instacart_25,
+ "yelp": Yelp,
+ "ml_10m": Movielens_10m,
+ "amazon-instant-video": AmazonInstantVideo,
}
dataset = dataset_mapping[config["dataset"]["dataset"]](
root_dir=config["system"]["root_dir"]
|
Update metadata.py
SMA prefix instead of MDH | @@ -197,9 +197,9 @@ class Metadata:
video["covr"] = [MP4Cover(cover, MP4Cover.FORMAT_JPEG)] # jpeg poster
if self.original:
- video["\xa9too"] = "MDH:" + os.path.basename(self.original)
+ video["\xa9too"] = "SMA:" + os.path.basename(self.original)
else:
- video["\xa9too"] = "MDH:" + os.path.basename(path)
+ video["\xa9too"] = "SMA:" + os.path.basename(path)
try:
self.log.info("Trying to write tags.")
|
fix Space Race reset
overwrites $100 eventually | ;license:MIT
-;(c) 2022 by qkumba
+;(c) 2022 by qkumba, Frank M.
!cpu 6502
!to "build/PRELAUNCH.INDEXED/SPACE.RACE",plain
!source "src/prelaunch/common.a"
+ENABLE_ACCEL
+ +NEW_RESET_VECTOR $300
lda #$60
sta $1621
jsr $1600 ; decompress
|
Update logstash.conf-template
missed some '++' | @@ -17,14 +17,14 @@ filter {
if [@metadata][DEBUG] != 'true' {
ruby {
init => "@ordernum = 0"
- code => "@ordernum += 1; tag_items = event['program'].split('++'); event['scale_order_num'] = @ordernum; event['scale_task'] = tag_items[0].sub(%r{^docker/}, ''); event['scale_job_exe'] = event['scale_task'].sub(%r{_[^_]*$}, ''); event['scale_node'] = event['logsource']; event['stream'] = event['severity'] == 3 ? 'stderr' : 'stdout'; event['job_type'] = tag_items[1]"
+ code => "@ordernum += 1; tag_items = event['program'].split('|'); event['scale_order_num'] = @ordernum; event['scale_task'] = tag_items[0].sub(%r{^docker/}, ''); event['scale_job_exe'] = event['scale_task'].sub(%r{_[^_]*$}, ''); event['scale_node'] = event['logsource']; event['stream'] = event['severity'] == 3 ? 'stderr' : 'stdout'; event['job_type'] = tag_items[1]"
remove_field => ["host", "priority", "timestamp8601", "logsource", "program", "pid", "severity", "facility", "timestamp", "facility_label", "severity_label", "job_type"]
}
}
else {
ruby {
init => "@ordernum = 0"
- code => "@ordernum += 1; tag_items = event['program'].split('++'); event['scale_order_num'] = @ordernum; event['scale_task'] = tag_items[0].sub(%r{^docker/}, ''); event['scale_job_exe'] = event['scale_task'].sub(%r{_[^_]*$}, ''); event['scale_node'] = event['logsource']; event['stream'] = event['severity'] == 3 ? 'stderr' : 'stdout'; event['job_type'] = tag_items[1]"
+ code => "@ordernum += 1; tag_items = event['program'].split('|'); event['scale_order_num'] = @ordernum; event['scale_task'] = tag_items[0].sub(%r{^docker/}, ''); event['scale_job_exe'] = event['scale_task'].sub(%r{_[^_]*$}, ''); event['scale_node'] = event['logsource']; event['stream'] = event['severity'] == 3 ? 'stderr' : 'stdout'; event['job_type'] = tag_items[1]"
}
}
|
Cleanup Pods in Error state based on time scheduled
It's possible for a Pod to be in an Error state and not have a
ContainersReady transition time, so lets go off of PodScheduled since
that should always exist. | @@ -60,14 +60,26 @@ def setup_logging(verbose):
logging.getLogger("kubernetes.client.rest").setLevel(logging.ERROR)
-def _completed_longer_than_threshold(pod: V1Pod, threshold: int) -> bool:
- time_finished = get_pod_condition(pod, "ContainersReady").last_transition_time
+def __condition_transition_longer_than_threshold(
+ pod: V1Pod, condition: str, threshold: int
+) -> bool:
+ time_finished = get_pod_condition(pod, condition).last_transition_time
time_now = datetime.now(tzutc())
# convert total seconds since completion to minutes
- completed_since_minutes = (time_now - time_finished).total_seconds() / 60
+ since_minutes = (time_now - time_finished).total_seconds() / 60
+
+ return since_minutes > threshold
+
+
+def _completed_longer_than_threshold(pod: V1Pod, threshold: int) -> bool:
+ return __condition_transition_longer_than_threshold(
+ pod, "ContainersReady", threshold
+ )
+
- return completed_since_minutes > threshold
+def _scheduled_longer_than_threshold(pod: V1Pod, threshold: int) -> bool:
+ return __condition_transition_longer_than_threshold(pod, "PodScheduled", threshold)
def terminate_pods(pods: Sequence[V1Pod], kube_client) -> tuple:
@@ -116,7 +128,7 @@ def main():
and pod.status.phase == "Failed"
# and that said Pod has been around for a while (generally longer than we'd leave
# Pods that exited sucessfully)
- and _completed_longer_than_threshold(pod, allowed_error_minutes)
+ and _scheduled_longer_than_threshold(pod, allowed_error_minutes)
):
errored_pods.append(pod)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.