message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Always upload cuspatial packages
Always upload cuspatial packages
Authors:
- Ray Douglass (https://github.com/raydouglass)
Approvers:
- AJ Schmidt (https://github.com/ajschmidt8)
- Jordan Jacobelli (https://github.com/Ethyling)
URL: | #!/usr/bin/env bash
-DEFAULT_CUDA_VER="11.5"
-DEFAULT_PYTHON_VER="3.8"
-
-#Upload cuspatial once per PYTHON
-if [[ "$CUDA" == "${DEFAULT_CUDA_VER}" ]]; then
export UPLOAD_CUSPATIAL=1
-else
- export UPLOAD_CUSPATIAL=0
-fi
-
-#Upload libcuspatial once per CUDA
-if [[ "$PYTHON" == "${DEFAULT_PYTHON_VER}" ]]; then
export UPLOAD_LIBCUSPATIAL=1
-else
- export UPLOAD_LIBCUSPATIAL=0
-fi
if [[ -z "$PROJECT_FLASH" || "$PROJECT_FLASH" == "0" ]]; then
#If project flash is not activate, always build both
|
Update napalm_syslog engine to use auth class
We have changed napalm-logs to have a keep alive so when the server
restarts the client automatically re-authenticates. This PR changes the
salt engine to make use of the new keep alive.
napalm-logs PR | @@ -312,9 +312,10 @@ def start(transport='zmq',
if not certificate:
log.critical('Please use a certificate, or disable the security.')
return
- priv_key, verify_key = napalm_logs.utils.authenticate(certificate,
+ auth = napalm_logs.utils.ClientAuth(certificate,
address=auth_address,
port=auth_port)
+
transport_recv_fun = _get_transport_recv(name=transport,
address=address,
port=port)
@@ -330,7 +331,7 @@ def start(transport='zmq',
log.debug('Received from napalm-logs:')
log.debug(raw_object)
if not disable_security:
- dict_object = napalm_logs.utils.decrypt(raw_object, verify_key, priv_key)
+ dict_object = auth.decrypt(raw_object)
else:
dict_object = napalm_logs.utils.unserialize(raw_object)
try:
|
Plugins: Reassign syntaxes after install before uninstall
Fixes
Removing the package means deleting the syntax file. While PC sets all
open files to Plain Text, we might try to assign default Markdown syntax
before uninstalling the package.
After installation assign all open files to
Packages/MarkdownEditing/Markdown.sublime-syntax | @@ -21,6 +21,14 @@ def save_ingored_packages(ignored_packages):
def disable_native_markdown_package():
ignored_packages = get_ingored_packages()
if 'Markdown' not in ignored_packages:
+ reassign_syntax(
+ 'Packages/Markdown/Markdown.sublime-syntax',
+ 'Packages/MarkdownEditing/Markdown.sublime-syntax'
+ )
+ reassign_syntax(
+ 'Packages/Markdown/MultiMarkdown.sublime-syntax',
+ 'Packages/MarkdownEditing/MultiMarkdown.sublime-syntax'
+ )
ignored_packages.append('Markdown')
save_ingored_packages(ignored_packages)
@@ -31,6 +39,25 @@ def enable_native_markdown_package():
ignored_packages.remove('Markdown')
save_ingored_packages(ignored_packages)
+ def reassign():
+ reassign_syntax(
+ 'Packages/MarkdownEditing/Markdown.sublime-syntax',
+ 'Packages/Markdown/Markdown.sublime-syntax'
+ )
+ reassign_syntax(
+ 'Packages/MarkdownEditing/MultiMarkdown.sublime-syntax',
+ 'Packages/Markdown/MultiMarkdown.sublime-syntax'
+ )
+ sublime.set_timeout(reassign, 100)
+
+
+def reassign_syntax(current_syntax, new_syntax):
+ for window in sublime.windows():
+ for view in window.views():
+ syntax = view.settings().get("syntax")
+ if syntax and syntax == current_syntax:
+ view.assign_syntax(new_syntax)
+
def on_after_install():
if "package_control" in sys.modules:
|
Add inactive status to practice meta title
So it shows up in search results. | {% load template_extras %}
{% load humanize %}
-{% block title %}Prescribing measures for {{ practice }}{% endblock %}
+{% block title %}Prescribing measures for {{ practice }}{{ practice.inactive_status_suffix }}{% endblock %}
{% block active_class %}practice{% endblock %}
{% block extra_css %}
|
Added Node version limitation hint
Thanks! | @@ -18,7 +18,7 @@ See [keyword documentation](https://marketsquare.github.io/robotframework-browse
Only Python 3.7 or newer is supported.
-1. Install node.js e.g. from https://nodejs.org/en/download/
+1. Install node.js e.g. from https://nodejs.org/en/download/ (only < v15 supported; if unsure, use 14.15.0 LTS)
2. Install robotframework-browser from the commandline: `pip install robotframework-browser`
3. Install the node dependencies: run `rfbrowser init` in your shell
- if `rfbrowser` is not found, try `python -m Browser.entry init
|
[Hockey] remove extra params to make command easier to use to lookup players.
prefer displaying onRoster players first but don't limit all players by it. | @@ -20,7 +20,7 @@ from .dev import HockeyDev
from .errors import InvalidFileError, NotAValidTeamError, UserHasVotedError, VotingHasEndedError
from .game import Game
from .gamedaychannels import GameDayChannels
-from .helper import HockeyStandings, HockeyStates, HockeyTeams, TeamDateFinder, YearFinder
+from .helper import HockeyStandings, HockeyStates, HockeyTeams, TeamDateFinder, YearFinder, YEAR_RE
from .menu import (
BaseMenu,
ConferenceStandingsPages,
@@ -1310,7 +1310,7 @@ class Hockey(HockeyDev, commands.Cog):
timeout=60,
).start(ctx=ctx)
- async def player_id_lookup(self, inactive: bool, name: str):
+ async def player_id_lookup(self, name: str):
now = datetime.utcnow()
saved = datetime.fromtimestamp(await self.config.player_db())
path = cog_data_path(self) / "players.json"
@@ -1325,10 +1325,11 @@ class Hockey(HockeyDev, commands.Cog):
players = []
for player in json.loads(f.read())["data"]:
if name.lower() in player["fullName"].lower():
- if player["onRoster"] == "N" and not inactive:
- continue
+ if player["onRoster"] == "N":
players.append(player["id"])
-
+ else:
+ players.insert(0, player["id"])
+ log.debug(players)
return players
@hockey_commands.command(aliases=["players"])
@@ -1336,21 +1337,20 @@ class Hockey(HockeyDev, commands.Cog):
async def player(
self,
ctx: commands.Context,
- inactive: Optional[bool] = False,
- season: Optional[YearFinder] = None,
*,
search: str,
):
"""
Lookup information about a specific player
- `[inactive=False]` Whether or not to search through inactive players as well
- `[season]` The season to get stats data on format can be `YYYY` or `YYYYYYYY`
`<search>` The name of the player to search for
+ you can include the season to get stats data on format can be `YYYY` or `YYYYYYYY`
"""
async with ctx.typing():
+ season = YEAR_RE.search(search)
season_str = None
if season:
+ search = YEAR_RE.sub("", search)
if season.group(3):
if (int(season.group(3)) - int(season.group(1))) > 1:
return await ctx.send(_("Dates must be only 1 year apart."))
@@ -1365,7 +1365,8 @@ class Hockey(HockeyDev, commands.Cog):
year = int(season.group(1)) + 1
season_str = f"{season.group(1)}{year}"
log.debug(season)
- players = await self.player_id_lookup(inactive, search)
+ log.debug(search)
+ players = await self.player_id_lookup(search.strip())
if players != []:
await BaseMenu(
source=PlayerPages(pages=players, season=season_str),
|
Improvements List Database Page
New Engine name - Engine.version2 + Topology.details
New Custom Filter - Engine
- Custom lookup returning only active engines
- Custom queryset based on provided engine id | @@ -8,6 +8,7 @@ from functools import partial
from bson.json_util import loads
from django.utils.translation import ugettext_lazy as _
from django_services import admin
+from django.contrib.admin import SimpleListFilter
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
@@ -25,6 +26,7 @@ from notification.tasks import TaskRegister
from system.models import Configuration
from util.html import show_info_popup
from logical.models import Database
+from physical.models import Engine
from logical.views import database_details, database_hosts, \
database_credentials, database_resizes, database_backup, database_dns, \
database_metrics, database_destroy, database_delete_host, \
@@ -41,8 +43,41 @@ from logical.service.database import DatabaseService
LOG = logging.getLogger(__name__)
-class DatabaseAdmin(admin.DjangoServicesAdmin):
+class RelatedEngineFilter(SimpleListFilter):
+ # Human-readable title which will be displayed in the
+ # right admin sidebar just above the filter options.
+ title = _('engine')
+
+ # Parameter for the filter that will be used in the URL query.
+ parameter_name = 'engine'
+ def lookups(self, request, model_admin):
+ """
+ Returns a list of tuples. The first element in each
+ tuple is the coded value for the option that will
+ appear in the URL query. The second element is the
+ human-readable name for the option that will appear
+ in the right sidebar.
+ """
+ engines = Engine.objects.filter(is_active=True)
+
+ return [(
+ engine.id, _("{}_{}".format(engine.name, engine.version2))
+ ) for engine in engines]
+
+ def queryset(self, request, queryset):
+ """
+ Returns the filtered queryset based on the value
+ provided in the query string and retrievable via
+ `self.value()`.
+ """
+ # Compare the requested value (either engine id or None)
+ if self.value():
+ return queryset.filter(databaseinfra__engine__id=self.value())
+ return queryset
+
+
+class DatabaseAdmin(admin.DjangoServicesAdmin):
"""
the form used by this view is returned by the method get_form
"""
@@ -61,12 +96,12 @@ class DatabaseAdmin(admin.DjangoServicesAdmin):
list_display_basic = [
"name_html", "organization_admin_page", "team_admin_page",
"engine_html", "environment",
- "offering_html", "friendly_status", "created_dt_format",
- "database_path"
+ "offering_html", "friendly_status", "created_dt_format"
]
list_display_advanced = list_display_basic + ["quarantine_dt_format"]
list_filter_basic = [
- "project", "databaseinfra__environment", "databaseinfra__engine",
+ "project", "databaseinfra__environment",
+ RelatedEngineFilter,
"databaseinfra__plan", "databaseinfra__engine__engine_type", "status",
"databaseinfra__plan__has_persistence",
"databaseinfra__plan__replication_topology__name",
@@ -138,11 +173,6 @@ class DatabaseAdmin(admin.DjangoServicesAdmin):
organization_admin_page.short_description = "Organization"
- def database_path(self, database):
- return database.infra.engine_patch.full_version
-
- database_path.short_description = "Patch"
-
def description_html(self, database):
html = []
@@ -173,7 +203,7 @@ class DatabaseAdmin(admin.DjangoServicesAdmin):
engine_type.admin_order_field = 'name'
def engine_html(self, database):
- engine_info = str(database.engine)
+ engine_info = str(database.databaseinfra.engine_patch.full_version)
topology = database.databaseinfra.plan.replication_topology
if topology.details:
|
Fix up stylint lint handling to account for differential behaviour when
style code is parseable or not. | @@ -68,7 +68,7 @@ function lint({ file, write, encoding = 'utf-8', silent = false } = {}) {
return;
}
const source = buffer.toString();
- let formatted;
+ let formatted = source;
let messages = [];
// Array of promises that we need to let resolve before finishing up.
let promises = [];
@@ -114,16 +114,12 @@ function lint({ file, write, encoding = 'utf-8', silent = false } = {}) {
return linted;
}
function lintStyle(code, style, callback, { lineOffset = 0, vue = false } = {}) {
- let linted = prettierFormat(code, style, vue);
- if (linted.trim() !== code.trim()) {
- notSoPretty = true;
- }
- // Stylelint's `_lintSource` method requires an absolute path for the codeFilename arg
+ // Stylelint's `lint` method requires an absolute path for the codeFilename arg
const codeFilename = !path.isAbsolute(file) ? path.join(process.cwd(), file) : file;
promises.push(
stylelint
.lint({
- code: linted,
+ code,
codeFilename,
config: stylelintConfig,
// For reasons beyond my ken, stylint borks on css files
@@ -133,10 +129,8 @@ function lint({ file, write, encoding = 'utf-8', silent = false } = {}) {
configBasedir: path.resolve(__dirname, '..'),
})
.then(output => {
- if (output.output.trim() !== code.trim()) {
- styleCodeUpdates.push(() => callback(output.output));
- }
- if (output.results) {
+ let stylinted;
+ if (output.results && output.results.length) {
messages.push(
stylelintFormatter(
output.results.map(message => {
@@ -147,13 +141,28 @@ function lint({ file, write, encoding = 'utf-8', silent = false } = {}) {
})
)
);
+ // There should only be one result, because we have only
+ // passed it a single file, this seems to be the only way
+ // to check if the `output` property of the output object has been set
+ // to valid style code, as opposed to a serialized copy of the formatted
+ // errors.
+ if (output.results[0]._postcssResult) {
+ stylinted = output.output;
+ }
+ }
+ let linted = prettierFormat(stylinted || code, style, vue);
+
+ if (linted.trim() !== (stylinted || code).trim()) {
+ notSoPretty = true;
+ }
+ if (linted.trim() !== code.trim()) {
+ styleCodeUpdates.push(() => callback(linted));
}
})
.catch(err => {
messages.push(err.toString());
})
);
- return linted;
}
try {
let extension = path.extname(file);
|
Update discord backend
Discord always asks permission | @@ -10,12 +10,14 @@ class DiscordOAuth2(BaseOAuth2):
AUTHORIZATION_URL = 'https://discordapp.com/api/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://discordapp.com/api/oauth2/token'
ACCESS_TOKEN_METHOD = 'POST'
+ REVOKE_TOKEN_URL = 'https://discordapp.com/api/oauth2/token/revoke'
+ REVOKE_TOKEN_METHOD = 'GET'
DEFAULT_SCOPE = ['identify']
SCOPE_SEPARATOR = '+'
REDIRECT_STATE = False
EXTRA_DATA = [
('expires_in', 'expires'),
- ('refresh_token', 'refresh_token', True)
+ ('refresh_token', 'refresh_token')
]
def get_user_details(self, response):
|
NY: May 28th
Closes
Closes
Closes | @@ -88,6 +88,41 @@ id: ny-merrick-1
## New York City
+### Police make violent arrests, officer breaks baton striking protestor | May 28th
+
+Footage taken at Union Square and East 17th street shows multiple officers grabbing and shoving a protestor to make an arrest. Another protestor confronts an officer who strikes the protestor in the legs with a baton. The baton snaps in two.
+
+tags: strike, arrest, grab, shove, protestor, baton
+
+id: ny-newyorkcity-55
+
+**Links**
+
+* https://twitter.com/ShimonPro/status/1266136557871869952
+
+
+### Police make violent arrest at Union Square | May 28th
+
+Footage shows a woman arguing with a police officer at a protest near Union Square. The officer grabs her by the backpack and pulls her. Other protestors attempt to help and police intervene. Police then shove the crowd back and arrest the protestor. Allegedly, 33 people were arrested at this scene.
+
+tags: shove, push, grab, protestor, arrest
+
+id: ny-newyorkcity-56
+
+**Links**
+
+* https://twitter.com/NY1/status/1266159669262893057
+
+
+### Police shove protestors at Union Square | May 28th
+
+Footage shows police using bikes to shove protestors on the sidewalk. One protestor appears to be forced over a concrete divider by the chassis of a bike.
+
+tags: shove, bike, protestor
+
+id: ny-newyorkcity-57
+
+
### Police shove woman to the ground, inducing a seizure | May 29th
Woman was sent to the ER due to seizure caused by policeman flinging her to the ground violently.
|
Distance: handle heterogeneous and multidimensional variables
e.g. [[0, 0], [0, 0, 0]] will cause np.max calls to fail | @@ -1140,7 +1140,14 @@ class Distance(ObjectiveFunction):
"""
+ try:
+ v1 = np.hstack(variable[0])
+ except TypeError:
v1 = variable[0]
+
+ try:
+ v2 = np.hstack(variable[1])
+ except TypeError:
v2 = variable[1]
# Maximum of Hadamard (elementwise) difference of v1 and v2
|
config: change cacert.pem to cacert.crt
keylime_ca uses cacert.crt not cacert.pem | @@ -226,7 +226,7 @@ registrar_tls_dir = CV
# The following three options set the filenames where the CA certificate,
# client certificate, and client private key file are, relative to the 'tls_dir'.
-# If 'tls_dir = default', then default values will be used for 'ca_cert = cacert.pem',
+# If 'tls_dir = default', then default values will be used for 'ca_cert = cacert.crt',
# 'my_cert = client-cert.crt', and 'private_key = client-private.pem'.
registrar_ca_cert = default
registrar_my_cert = default
@@ -369,7 +369,7 @@ tls_dir = default
# The following three options set the filenames where the CA certificate,
# client certificate, and client private key file are, relative to the 'tls_dir'.
-# If 'tls_dir = default', then default values will be used for 'ca_cert = cacert.pem',
+# If 'tls_dir = default', then default values will be used for 'ca_cert = cacert.crt',
# 'my_cert = client-cert.crt', and 'private_key = client-private.pem'.
ca_cert = default
my_cert = default
@@ -403,7 +403,7 @@ registrar_tls_dir = CV
# The following three options set the filenames where the registrar CA certificate,
# client certificate, and client private key file are, relative to the 'tls_dir'.
-# if 'tls_dir = default', then default values will be used for 'ca_cert = cacert.pem',
+# if 'tls_dir = default', then default values will be used for 'ca_cert = cacert.crt',
# 'my_cert = client-cert.crt', and 'private_key = client-private.pem'.
registrar_ca_cert = default
registrar_my_cert = default
|
request_client: add the option to ignore hostname validation
For most certificates we do not care about the hostname | @@ -5,14 +5,20 @@ Copyright 2017 Massachusetts Institute of Technology.
import requests
+from requests.adapters import HTTPAdapter
+from requests.packages.urllib3.poolmanager import PoolManager # pylint: disable=import-error
+
class RequestsClient:
- def __init__(self, base_url, tls_enabled, **kwargs):
+ def __init__(self, base_url, tls_enabled, ignore_hostname=False, **kwargs):
if tls_enabled:
self.base_url = f'https://{base_url}'
else:
self.base_url = f'http://{base_url}'
self.session = requests.Session()
+ if ignore_hostname:
+ self.session.mount("http://", HostNameIgnoreAdapter())
+ self.session.mount("https://", HostNameIgnoreAdapter())
for arg, value in kwargs.items():
if isinstance(value, dict):
value = self.__deep_merge(
@@ -49,3 +55,17 @@ class RequestsClient:
else:
destination[key] = value
return destination
+
+
+class HostNameIgnoreAdapter(HTTPAdapter):
+ """
+ This HTTPAdapter just ignores the Hostname validation.
+
+ It is required because in most cases we don't know the hostname during certificate generation.
+ """
+ def init_poolmanager(self, connections, maxsize, block=requests.adapters.DEFAULT_POOLBLOCK, **pool_kwargs):
+ self.poolmanager = PoolManager(num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ strict=True,
+ assert_hostname=False, **pool_kwargs)
|
Add an allow_moderation_roles argument to the wait_for_deletion() util
The `allow_moderation_roles` bool can be specified to allow anyone with a role in `MODERATION_ROLES` to delete
the message. | @@ -11,7 +11,7 @@ from discord.errors import HTTPException
from discord.ext.commands import Context
import bot
-from bot.constants import Emojis, NEGATIVE_REPLIES
+from bot.constants import Emojis, NEGATIVE_REPLIES, MODERATION_ROLES
log = logging.getLogger(__name__)
@@ -22,12 +22,15 @@ async def wait_for_deletion(
deletion_emojis: Sequence[str] = (Emojis.trashcan,),
timeout: float = 60 * 5,
attach_emojis: bool = True,
+ allow_moderation_roles: bool = True
) -> None:
"""
Wait for up to `timeout` seconds for a reaction by any of the specified `user_ids` to delete the message.
An `attach_emojis` bool may be specified to determine whether to attach the given
`deletion_emojis` to the message in the given `context`.
+ An `allow_moderation_roles` bool may also be specified to allow anyone with a role in `MODERATION_ROLES` to delete
+ the message.
"""
if message.guild is None:
raise ValueError("Message must be sent on a guild")
@@ -46,6 +49,7 @@ async def wait_for_deletion(
reaction.message.id == message.id
and str(reaction.emoji) in deletion_emojis
and user.id in user_ids
+ or allow_moderation_roles and any(role.id in MODERATION_ROLES for role in user.roles)
)
with contextlib.suppress(asyncio.TimeoutError):
|
Fix search
// in the url was making the search fail | @@ -26,7 +26,7 @@ class ListNovelCrawler(Crawler):
def search_novel(self, query):
query = quote_plus(query.lower())
- soup = self.get_soup(search_url % (self.home_url, query))
+ soup = self.get_soup(search_url % (self.home_url.removesuffix("/"), query))
results = []
for tab in soup.select('.sect-body .thumb-item-flow'):
|
Ignore extrafanart invocations on each addon path
E.g. Black Glass Nova skin use extrafanart, and call multiple times the addon at each path,
mainly cause problem with the playback, but also makes multiple list loads | @@ -111,16 +111,19 @@ def lazy_login(func):
def route(pathitems):
"""Route to the appropriate handler"""
LOG.debug('Routing navigation request')
- root_handler = pathitems[0] if pathitems else G.MODE_DIRECTORY
+ if pathitems:
+ if 'extrafanart' in pathitems:
+ LOG.warn('Route: ignoring extrafanart invocation')
+ return False
+ root_handler = pathitems[0]
+ else:
+ root_handler = G.MODE_DIRECTORY
if root_handler == G.MODE_PLAY:
from resources.lib.navigation.player import play
play(videoid=pathitems[1:])
elif root_handler == G.MODE_PLAY_STRM:
from resources.lib.navigation.player import play_strm
play_strm(videoid=pathitems[1:])
- elif root_handler == 'extrafanart':
- LOG.warn('Route: ignoring extrafanart invocation')
- return False
else:
nav_handler = _get_nav_handler(root_handler, pathitems)
_execute(nav_handler, pathitems[1:], G.REQUEST_PARAMS, root_handler)
|
api_docs: Add "StreamIdInPath" common component.
To facilitate re-use of the same parameters in other paths, this commit
store the content of the parameter "stream_id" (in path) in components. | @@ -1666,14 +1666,7 @@ paths:
description: |
Get all the topics in a specific stream.
parameters:
- - name: stream_id
- in: path
- description: |
- The unique ID of the stream.
- schema:
- type: integer
- example: 42
- required: true
+ - $ref: '#/components/parameters/StreamIdInPath'
responses:
'200':
description: Success.
@@ -2969,14 +2962,7 @@ paths:
Delete the stream with the given ID.
operationId: zerver.views.streams.deactivate_stream_backend
parameters:
- - name: stream_id
- in: path
- description: |
- The ID of the stream to be deleted.
- schema:
- type: integer
- example: 42
- required: true
+ - $ref: '#/components/parameters/StreamIdInPath'
responses:
'200':
description: Success.
@@ -3011,14 +2997,7 @@ paths:
Update the stream with the given ID.
operationId: zerver.views.streams.update_stream_backend
parameters:
- - name: stream_id
- in: path
- description: |
- The ID of the stream to be updated.
- schema:
- type: integer
- example: 42
- required: true
+ - $ref: '#/components/parameters/StreamIdInPath'
- name: description
in: query
description: |
@@ -3703,3 +3682,12 @@ components:
schema:
type: integer
example: 42
+ StreamIdInPath:
+ name: stream_id
+ in: path
+ description: |
+ The ID of the stream to access.
+ schema:
+ type: integer
+ example: 42
+ required: true
|
Update staging.yaml
removed some merged branches | @@ -52,15 +52,9 @@ branches:
- es/module-display # Ethan March 4th
#- nh/cdc/one_domain # Norman March 26
- sr-ucr-mirror # Sravan May 1
- - mk/ccz-hosting-revamp # MK May 6
- fr/case-templates # FR May 15
- #- mk/media-version-on-revert # MK May 16
- form-odata # Nick P May 21
- web-user-reports-project-access # Gabriella May 13
- - jls/linked-app-incremental-versioning # Jenny May 20
- - jls/-bulk-conditional-alerts-add-messages-alternative # Jenny May 23
- test-RestoreConfig-repr-method # Gabriella May 31
-
- # This is being actively QAed. Don't comment it out without warning the QA team.
- jls/final-select2-v4 # Jenny May 25
submodules: {}
|
DOC: updated CHANGELOG
Updated changelog to include this fix. | @@ -30,6 +30,7 @@ This project adheres to [Semantic Versioning](http://semver.org/).
- Fixed pysat_testing method definition to include mangle_file_dates keyword
- Added small time offsets (< 1s) to ensure COSMIC files and data have unique times
- Updates to Travis CI environment
+ - Removed `inplace` use in xarray `assign` function, which is no longer allowed
## [2.1.0] - 2019-11-18
|
Add an integration test for dx download within inaccessible project
Summary:
The integration test for dx download executed from a project
the user had lost access to.
Test Plan: this test passed in jenkins dxpy-branch-integration-tests-on-staging
Reviewers: sking | @@ -1363,6 +1363,41 @@ class TestDXClientUploadDownload(DXTestCase):
# Even after project 1 is destroyed, the download URL should still work
run("wget -O /dev/null " + download_url)
+ @unittest.skipUnless(testutil.TEST_ENV,
+ 'skipping test that would clobber your local environment')
+ def test_dx_download_when_current_project_inaccessible(self):
+ with testutil.TemporaryFile() as fd:
+ with temporary_project("test_dx_accessible_project", select=True) as p_accessible:
+ expected_content = '1234'
+ fd.write(expected_content)
+ fd.flush()
+ fd.close()
+ tmp_filename = os.path.basename(fd.name)
+ listing = run("dx upload --wait {filepath} --path {project}:{filename}".format(
+ filepath=fd.name, project=p_accessible.get_id(), filename=tmp_filename))
+ self.assertIn(p_accessible.get_id(), listing)
+ self.assertIn(os.path.basename(fd.name), listing)
+
+ # Create another project, select it, and remove it to loose access to it
+ p_inaccessible_name = ("test_dx_inaccessible_project" + str(random.randint(0, 1000000)) + "_" +
+ str(int(time.time() * 1000)))
+ p_inaccessible_id = run("dx new project {name} --brief --select"
+ .format(name=p_inaccessible_name)).strip()
+ with select_project(p_inaccessible_id):
+ self.assertEqual(run("dx find projects --brief --name {name}"
+ .format(name=p_inaccessible_name)).strip(), p_inaccessible_id)
+ run("dx rmproject -y {name} -q".format(name=p_inaccessible_name))
+ self.assertEqual(run("dx find projects --brief --name {name}"
+ .format(name=p_inaccessible_name)).strip(), "")
+ current_project_env_var = dxpy.config.get('DX_PROJECT_CONTEXT_ID', None)
+ self.assertEqual(p_inaccessible_id, current_project_env_var)
+ # Successfully download file from the accessible project
+ run("dx download {project}:{filename}"
+ .format(project=p_accessible.name, filename=tmp_filename)).strip()
+ result_content = run("dx head {project}:{filename}"
+ .format(project=p_accessible.name, filename=tmp_filename)).strip()
+ self.assertEqual(expected_content, result_content)
+
def test_dx_upload_mult_paths(self):
testdir = tempfile.mkdtemp()
os.mkdir(os.path.join(testdir, 'a'))
|
resolve lint error in firewalld state
'String formatting used in logging' for invalid ICMP type | @@ -424,7 +424,7 @@ def _present(name,
ret['comment'] = 'Error: {0}'.format(err)
return ret
else:
- log.error('{0} is an invalid ICMP type'.format(icmp_type))
+ log.error('%s is an invalid ICMP type', icmp_type)
if prune_block_icmp:
old_icmp_types = set(_current_icmp_blocks) - set(block_icmp)
|
Changes to Mattermost Partner Programs
Clarified some steps in the process | @@ -15,19 +15,25 @@ The purpose of the Mattermost Authorized Reseller Program is to enable customers
Companies who enter into a Mattermost Authorized Reseller agreement typically have existing relationships with customers and help them procure information technology products.
-Purchasing as a Mattermost Authorized Reseller
+Becoming a Mattermost Authorized Reseller
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- Complete a `contact form <https://about.mattermost.com/contact/>`_ with Mattermost requesting an authorized reseller discussion, including the name of the customer to which you would like to resell.
-- You'll work with a partner manager over email, or possibly phone, to understand the details of your business and the resale transaction.
+- Complete a `contact form <https://about.mattermost.com/contact/>`_ with Mattermost requesting an authorized reseller discussion and, if available, include the name of the customer to which you would like to resell.
+- You'll be in contact with a partner manager over email, or possibly phone, to understand the details of your business and the resale transaction.
- Once the resale is approved internally, you will be provided a one-time click-sign authorized reseller agreement to complete, along with purchase order information for your customer.
- - Note: Mattermost does not accept customer purchase orders or custom terms.
+Placing an Order as a Mattermost Authorized Reseller
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ When ready to place an order submit a PO to [email protected] along with the name, address and contact information of your customer.
+ - Note: Mattermost does not accept customer purchase orders or custom terms for Reseller orders.
+
- Mattermost, Inc. will then:
- - Issue you an invoice (payment due within 30 days)
+ - Review the terms and conditions of the order
- Send the customer contact a request to complete a `customer registration form <https://about.mattermost.com/customer-registration/>`_, which includes an agreement to accept `Enterprise Edition Subscription Terms for Purchase by Resale <https://about.mattermost.com/customer-terms-and-conditions/>`_.
- After the customer registration form is complete, it will be reviewed within one business day and a license key issued via email
+ - Issue you an invoice (payment due within 30 days)
Mattermost Value-Added Reseller Program
------------------------------------------------
@@ -51,14 +57,17 @@ Becoming a Mattermost Value-Added Reseller ("VAR")
Order Processing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- For each customer opportunity, `complete a deal registration form <https://about.mattermost.com/reseller-deal-registration/>`_ to be eligible for a reseller discount after the opportunity is confirmed as not in discussions about Mattermost via another company or person.
+- For each customer opportunity, `complete a deal registration form <https://about.mattermost.com/reseller-deal-registration/>`_ to be eligible for a reseller discount.
+- Once the deal is registred Mattermost will review the registration and contact you regarding approval, rejection or to obtain more information.
- Request a quotation from your Mattermost Strategic Alliance Manager for customer purchase, including email address of customer contact who will receive the license key.
- Sign quotation to issue a purchase order.
- Mattermost, Inc. will then:
- - Issue you an invoice (payment due within 30 days)
+ - Review the terms and conditions of the order
- Send the customer contact a request to complete a `customer registration form <https://about.mattermost.com/customer-registration/>`_, which includes an agreement to accept `Enterprise Edition Subscription Terms for Purchase by Resale <https://about.mattermost.com/customer-terms-and-conditions/>`_.
- After the customer registration form is complete, it will be reviewed within one business day and a license key issued via email
+ - Issue you an invoice (payment due within 30 days)
+
Certified Reseller Developer License
Mattermost Deployment Solutions Partner Program
|
Add more multiprocessing function stubs
Fixes | # Stubs for multiprocessing
-from typing import Any, Callable, Iterable, Mapping, Optional, Dict, List
+from typing import Any, Callable, Iterable, Mapping, Optional, Dict, List, Union
+from logging import Logger
from multiprocessing.context import BaseContext
from multiprocessing.managers import SyncManager
from multiprocessing.pool import AsyncResult
from multiprocessing.process import current_process as current_process
+import sys
class Lock():
def acquire(self, block: bool = ..., timeout: int = ...) -> None: ...
@@ -101,6 +103,18 @@ class Value():
def __init__(self, typecode_or_type: str, *args: Any, lock: bool = ...) -> None: ...
# ----- multiprocessing function stubs -----
+def active_children() -> List[Process]: ...
+def allow_connection_pickling() -> None: ...
def cpu_count() -> int: ...
def freeze_support() -> None: ...
+def get_logger() -> Logger: ...
+def log_to_stderr(level: Optional[Union[str, int]] = ...) -> Logger: ...
def Manager() -> SyncManager: ...
+def set_forkserver_preload(module_names: List[str]) -> None: ...
+if sys.platform == 'win32' or sys.version_info >= (3, 4):
+ def set_executable(executable: str) -> None: ...
+if sys.version_info >= (3, 4):
+ def get_all_start_methods() -> List[str]: ...
+ def get_context(method: Optional[str] = ...) -> BaseContext: ...
+ def get_start_method(allow_none: Optional[bool]) -> Optional[str]: ...
+ def set_start_method(method: str, force: Optional[bool] = ...) -> None: ...
|
Update reduction_examples.rst
Add reduceccd | @@ -6,9 +6,12 @@ Here are some examples and different repositories using `ccdproc`.
* `ipython notebook`_
* `WHT basic reductions`_
* `pyhrs`_
+* `reduceccd`_
.. _ipython notebook: http://nbviewer.ipython.org/gist/mwcraig/06060d789cc298bbb08e
.. _WHT basic reductions: https://github.com/crawfordsm/wht_reduction_scripts/blob/master/wht_basic_reductions.py
.. _pyhrs: https://github.com/saltastro/pyhrs
+.. _reduceccd: https://github.com/rgbIAA/reduceccd
+
|
fix: add files under tests/ missing in sdist
Add files under tests/ missing, tests/res/20-00-cnf.sh for example, in
sdist by fixing the glob patterns in MANIFEST.in.
It may close I think. | @@ -8,4 +8,5 @@ include docs/api/*.*
include pkg/*
include setup.py
recursive-include src *.py
-recursive-include tests *.py *.yml *.txt *.json *.yml
+# for f in tests/**/*.* ; do echo ${f/*\./*.}; done | sort | uniq
+recursive-include tests *.ini *.json *.properties *.py *.sh *.toml *.xml *.yml
|
Prevent reloads when filters don't change
Fixes | @@ -122,10 +122,12 @@ export default class FilterDropdown extends React.Component {
};
handleClose = () => {
- const {onClose, setGlobalState} = this.props;
+ const {onClose, setGlobalState, initialValues} = this.props;
const {fieldValues} = this.state;
+ if (!_.isEqual(initialValues, fieldValues)) {
this.setRenderedValue(fieldValues);
setGlobalState(fieldValues);
+ }
onClose();
};
|
fix bugs in load balancing, hot thermal production
Production limits in constraint (4f) should be specific to Boiler, not all heating technologies
Electric Chiller consumption now included in electrical load balancing in time periods with with no grid access | @@ -344,7 +344,7 @@ function add_storage_op_constraints(m, p)
)
# Constraint (4f)-1: (Hot) Thermal production sent to storage or grid must be less than technology's rated production
if !isempty(p.BoilerTechs)
- @constraint(m, HeatingTechProductionFlowCon[b in p.HotTES, t in p.HeatingTechs, ts in p.TimeStep],
+ @constraint(m, HeatingTechProductionFlowCon[b in p.HotTES, t in p.BoilerTechs, ts in p.TimeStep],
m[:dvProductionToStorage][b,t,ts] <=
p.ProductionFactor[t,ts] * m[:dvThermalProduction][t,ts]
)
@@ -545,7 +545,7 @@ function add_load_balance_constraints(m, p)
sum( m[:dvDischargeFromStorage][b,ts] for b in p.ElecStorage ) ==
sum( sum(m[:dvProductionToStorage][b,t,ts] for b in p.ElecStorage) +
sum(m[:dvProductionToGrid][t,u,ts] for u in p.CurtailmentTiers) for t in p.ElectricTechs) +
- ## sum(m[:dvThermalProduction][t,ts] for t in p.CoolingTechs )/ p.ElectricChillerEfficiency +
+ sum(m[:dvThermalProduction][t,ts] for t in p.ElectricChillers )/ p.ElectricChillerCOP +
p.ElecLoad[ts]
)
end
|
CI: conftest.json files opened as utf-8
(Otherwise Windows is not able to read utf-8 encoded strings for new node text) | @@ -171,7 +171,7 @@ def is_current_version_compatible(test_id,
png1 = os.path.join(tmp_dir, "1.png")
png2 = os.path.join(tmp_dir, "2.png")
- config = json.load(open(json_config))
+ config = json.load(open(json_config, encoding="utf-8"))
check_render = config["check"]["render"]
render_options = {}
|
ViewportGadget : Don't reset centre of interest in `setCamera()`
This fixes the following bugs :
Centre of interest lost after adjusting clipping planes or field of view
in the SceneView.
Centre of interest lost after switching to a look-through camera and back
in the SceneView. | @@ -76,6 +76,7 @@ class ViewportGadget::CameraController : public boost::noncopyable
public :
CameraController( IECoreScene::CameraPtr camera )
+ : m_centreOfInterest( 1.0f )
{
setCamera( camera );
}
@@ -96,8 +97,6 @@ class ViewportGadget::CameraController : public boost::noncopyable
{
m_fov = nullptr;
}
-
- m_centreOfInterest = 1;
}
IECoreScene::Camera *getCamera()
|
resources: default creator: avoid None
current_user can be None as well (in the execution-scheduler).
In that case, fall back to the `parent_instance` part. | @@ -123,7 +123,7 @@ class SQLResourceBase(SQLModelBase):
with db.session.no_autoflush:
if not self.creator:
user = current_user._get_current_object()
- if user.is_authenticated:
+ if user is not None and user.is_authenticated:
self.creator = user
else:
self.creator = parent_instance.creator
|
Modify test_dates_not_supported_by_date_time()...
... to check for just a substring instead | @@ -717,9 +717,7 @@ class TestFreshnessDateDataParser(BaseTestCase):
self.given_parser()
self.given_date_string(date_string)
self.when_date_is_parsed()
- if isinstance(self.error, ValueError):
- self.error = ValueError(re.sub('year [-+]*\d+ is out of range','year is out of range',str(self.error)))
- self.then_error_was_raised(ValueError, ['year is out of range',
+ self.then_error_was_raised(ValueError, ['is out of range',
"('year must be in 1..9999'"])
@parameterized.expand([
|
Fix a broken join
Related to | @@ -54,7 +54,7 @@ FROM
INNER JOIN
{project}.{hscic}.normalised_prescribing_standard rx
ON
- rx.month = dt.date
+ rx.month = TIMESTAMP(dt.date)
AND rx.bnf_code = dt.bnf_code
WHERE
-- These can be prescribed fractionally, but BSA round quantity down,
|
Update sso-saml-ldapsync.rst
Added "to Mattermost" for uniformity with the sentence above. | @@ -27,5 +27,5 @@ Once the synchronization with AD/LDAP is enabled, user attributes are synchroniz
.. note::
If a user is deactivated from AD/LDAP, they will be deactivated in Mattermost on the next sync. They will be shown as "Inactive" in the System Console users list, all of their sessions will expire and they won't be able to log back in to Mattermost.
- If a user is deactivated from SAML, their session won't expire until they're deactivated from AD/LDAP. However, they won't be able to log back in.
+ If a user is deactivated from SAML, their session won't expire until they're deactivated from AD/LDAP. However, they won't be able to log back in to Mattermost.
|
Update palindrome_products_test.py
Changes two tests to assertFactorsEqual in order to allow implementations that don't use lists to pass. | @@ -62,12 +62,12 @@ class PalindromeProductsTest(unittest.TestCase):
def test_empty_for_smallest_palindrome_if_none_in_range(self):
value, factors = smallest_palindrome(min_factor=1002, max_factor=1003)
self.assertIsNone(value)
- self.assertEqual(factors, [])
+ self.assertFactorsEqual(factors, [])
def test_empty_for_largest_palindrome_if_none_in_range(self):
value, factors = largest_palindrome(min_factor=15, max_factor=15)
self.assertIsNone(value)
- self.assertEqual(factors, [])
+ self.assertFactorsEqual(factors, [])
def test_error_for_smallest_if_min_is_more_than_max(self):
with self.assertRaisesWithMessage(ValueError):
|
Docstring typo
Should be "...max..." instead of "...sum..." | @@ -2846,7 +2846,7 @@ If you wish to change this behavior, please set discard_failed_expectations, dis
):
"""Expect the column max to be between an min and max value
- expect_column_sum_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
+ expect_column_max_to_be_between is a :func:`column_aggregate_expectation <great_expectations.dataset.base.Dataset.column_aggregate_expectation>`.
Args:
column (str): \
|
Update checkdb.py
need if env == 'development': block for staging and production deploys | @@ -3,14 +3,14 @@ import os
from keys import *
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
-
+env = os.getenv('APP_ENV')
dbname = os.getenv("DB_NAME")
+
+if env == 'development':
dbhost = dev_database_host
dbuser = dev_user
dbpass = dev_user_password
-env = os.getenv('APP_ENV')
-
-if env == 'staging':
+elif env == 'staging':
dbhost = staging_database_host
dbuser = staging_user
dbpass = staging_user_password
@@ -18,6 +18,11 @@ elif env == 'production':
dbhost = prod_database_host
dbuser = production_user
dbpass = production_user_password
+else:
+ dbname = dev_database_name
+ dbhost = dev_database_host
+ dbuser = dev_user
+ dbpass = dev_user_password
conn = psycopg2.connect(
dbname="postgres",
|
fix(deploy): Allow all Partitions for S3 Policy on managed stack
Solves where creating the managed stack in any partition but aws
will fail. | @@ -17,6 +17,7 @@ from samcli import __version__
from samcli.cli.global_config import GlobalConfig
from samcli.commands.exceptions import UserException, CredentialsError, RegionError
+
SAM_CLI_STACK_NAME = "aws-sam-cli-managed-default"
LOG = logging.getLogger(__name__)
@@ -142,9 +143,10 @@ def _get_stack_template():
Fn::Join:
- ""
-
- - "arn:aws:s3:::"
- -
- !Ref SamCliSourceBucket
+ - "arn:"
+ - !Ref AWS::Partition
+ - ":s3:::"
+ - !Ref SamCliSourceBucket
- "/*"
Principal:
Service: serverlessrepo.amazonaws.com
|
Fixed typo in README.md
fixed typo RECOMMAND -> RECOMMEND in line 286 | @@ -283,7 +283,7 @@ cd data/
unzip libri_fmllr_cmvn.zip # features used for TERA
```
-### On-the-fly Feature Extraction (RECOMMANDED)
+### On-the-fly Feature Extraction (RECOMMENDED)
- This feature allow users to run training and testing with out preprocessing data, feature extraction is done during runtime (This will not increase your training time!).
- To **enable bucketing** (optional, but substantially increase training efficiency), you need to run this script to get all the length of the training data.
```bash
|
request_force_close: add 1s delay before closing the tranport,
so that the remote task does not get cancelled. | @@ -2268,15 +2268,20 @@ class LNWallet(LNWorker):
peer_addr = LNPeerAddr(host, port, node_id)
transport = LNTransport(privkey, peer_addr, proxy=self.network.proxy)
peer = Peer(self, node_id, transport, is_channel_backup=True)
+ async def trigger_force_close_and_wait():
+ # wait before closing the transport, so that
+ # remote has time to process the message.
+ await peer.trigger_force_close(channel_id)
+ await asyncio.sleep(1)
+ peer.transport.close()
try:
async with OldTaskGroup(wait=any) as group:
await group.spawn(peer._message_loop())
- await group.spawn(peer.trigger_force_close(channel_id))
+ await group.spawn(trigger_force_close_and_wait())
return
except Exception as e:
self.logger.info(f'failed to connect {host} {e}')
continue
- # TODO close/cleanup the transport
else:
raise Exception('failed to connect')
|
2.6.1
Automatically generated by python-semantic-release | @@ -9,7 +9,7 @@ https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers
"""
from datetime import timedelta
-__version__ = "2.6.0"
+__version__ = "2.6.1"
PROJECT_URL = "https://github.com/custom-components/alexa_media_player/"
ISSUE_URL = "{}issues".format(PROJECT_URL)
|
_set_heat_capacity_P_polyfit: missing factor 2 in dvdt
method QHA._set_heat_capacity_P_polyfit :
if equilibrium volumes vs T is a polynomial of degree 2, then dvdt should be parameters[0] * 2 * t + parameters[1] | @@ -848,7 +848,7 @@ class QHA(object):
msg = ("Failed to fit equilibrium volumes vs T to "
"polynomial of degree 2.")
raise RuntimeError(msg)
- dvdt = parameters[0] * t + parameters[1]
+ dvdt = parameters[0] * 2 * t + parameters[1]
cp.append(cv_p + t * dvdt * dsdv_t)
dsdv.append(dsdv_t)
|
Fix minor typo in pong tutorial code comments
While reading through the pong tutorial code, I noticed that `of` should
have been `off` in code comments. This commit fixes the issue. | @@ -40,7 +40,7 @@ class PongGame(Widget):
def update(self, dt):
self.ball.move()
- # bounce of paddles
+ # bounce off paddles
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
@@ -48,7 +48,7 @@ class PongGame(Widget):
if (self.ball.y < self.y) or (self.ball.top > self.top):
self.ball.velocity_y *= -1
- # went of to a side to score point?
+ # went off to a side to score point?
if self.ball.x < self.x:
self.player2.score += 1
self.serve_ball(vel=(4, 0))
|
Clarified config setting unit of measure
Updated Idle Timeout config setting to clarify that the value specified is seconds. Addresses | @@ -336,7 +336,7 @@ Read Timeout
|all-plans| |self-hosted|
-Maximum time allowed from when the connection is accepted to when the request body is fully read.
+Maximum time allowed in seconds from when the connection is accepted to when the request body is fully read.
+----------------------------------------------------------------------------------------+
| This feature's ``config.json`` setting is ``"ReadTimeout": 300`` with numerical input. |
@@ -347,7 +347,7 @@ Write Timeout
|all-plans| |self-hosted|
-If using HTTP (insecure), this is the maximum time allowed from the end of reading the request headers until the response is written. If using HTTPS, it is the total time from when the connection is accepted until the response is written.
+If using HTTP (insecure), this is the maximum time in seconds allowed from the end of reading the request headers until the response is written. If using HTTPS, it is the total time from when the connection is accepted until the response is written.
+-----------------------------------------------------------------------------------------+
| This feature's ``config.json`` setting is ``"WriteTimeout": 300`` with numerical input. |
@@ -358,7 +358,7 @@ Idle Timeout
|all-plans| |self-hosted|
-Set an explicit idle timeout in the HTTP server. This is the maximum time allowed before an idle connection is disconnected.
+Set an explicit idle timeout in seconds in the HTTP server. This is the maximum time allowed before an idle connection is disconnected.
+-----------------------------------------------------------------------------------------+
| This feature's ``config.json`` setting is ``"IdleTimeout": 60`` with numerical input. |
|
We only need to force a hardware buffer
Use the optimal sound format if the hardware supports it | // quality and more!
// Launch Options:
-// -novid -nojoy -noff -nohltv -nouserclip -softparticlesdefaultoff -reuse -usetcp -NoQueuedPacketThread -primarysound -snoforceformat
+// -novid -nojoy -noff -nohltv -nouserclip -softparticlesdefaultoff -reuse -usetcp -NoQueuedPacketThread -primarysound
//
// -novid : disables Valve startup logo
// -nojoy : stops Joystick system from starting up
// -usetcp : allow usage of TCP where it would be more optimal than UDP, better network performance
// -NoQueuedPacketThread : do not start the queued packet thread unused in this config
// -primarysound : Always use a direct hardware sound buffer
-// -snoforceformat : Do not set the hardware sound format
// Extra Launch Options:
// -noforcemaccel -noforcemparms -noforcemspd : uses Windows mouse settings, does not work with raw input
// -dxlevel 100 : use the highest possible DirectX level for maximum performance
// -high : runs TF2 with High priority, which runs tasks ASAP, but not before OS level tasks, more FPS and
// responsiveness
+// -snoforceformat : Do not set the hardware sound format
// -r_emulate_gl : Emulates OpenGL
// Niche Launch Options:
|
HAProxy: fix bind mount to expose stats socket
configures the HAProxy
service to expose the stats socket with a bind mount, however the
main service container doesn't use that bind mount. Fix that. | @@ -211,7 +211,7 @@ outputs:
# the necessary bit and prevent systemd to try to reload the service in the container
- /usr/libexec/iptables:/usr/libexec/iptables:ro
- /usr/libexec/initscripts/legacy-actions:/usr/libexec/initscripts/legacy-actions:ro
- - /var/lib/haproxy:/var/lib/haproxy
+ - /var/lib/haproxy:/var/lib/haproxy:rw
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
haproxy:
@@ -226,6 +226,7 @@ outputs:
-
- /var/lib/kolla/config_files/haproxy.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/puppet-generated/haproxy/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/haproxy:/var/lib/haproxy:rw
- if:
- public_tls_enabled
- - list_join:
|
Fix ML Engine Dashboard link
from Google internal link | @@ -233,7 +233,7 @@ submit training` command is correct. ML Engine does not distinguish between
training and evaluation jobs.
Users can monitor and stop training and evaluation jobs on the [ML Engine
-Dasboard](https://pantheon.corp.google.com/mlengine/jobs).
+Dasboard](https://console.cloud.google.com/mlengine/jobs).
## Monitoring Progress with Tensorboard
|
tests: Verify logs of incoming webhook profile api key validation.
This commit verify warning logs while testing validate_api_key and
profile is incoming webhook but is_webhook is not set to True.
Verification is done using assertLogs so that logs does not cause spam
by printing in the test output. | @@ -1341,9 +1341,12 @@ class TestValidateApiKey(ZulipTestCase):
self._change_is_active_field(self.default_bot, True)
def test_validate_api_key_if_profile_is_incoming_webhook_and_is_webhook_is_unset(self) -> None:
- with self.assertRaises(JsonableError):
+ with self.assertRaises(JsonableError), self.assertLogs(level="WARNING") as root_warn_log:
api_key = get_api_key(self.webhook_bot)
validate_api_key(HostRequestMock(), self.webhook_bot.email, api_key)
+ self.assertEqual(root_warn_log.output, [
+ 'WARNING:root:User [email protected] (zulip) attempted to access API on wrong subdomain ()'
+ ])
def test_validate_api_key_if_profile_is_incoming_webhook_and_is_webhook_is_set(self) -> None:
api_key = get_api_key(self.webhook_bot)
|
Update README.md
Add link to new language_modes.talon for enabling programming languages. | @@ -168,8 +168,8 @@ Specific programming languages may be activated by voice commands, or via title
Activating languages via commands will enable the commands globally, e.g. they'll work in any application. This will also disable the title tracking method (code.language in .talon files) until the "clear language modes" voice command is used.
-The commands are defined here:
-https://github.com/knausj85/knausj_talon/blob/69d0207c873e860002b137f985dd7cb001183a47/modes/modes.talon#L29
+The commands for enabling languages are defined here:
+https://github.com/knausj85/knausj_talon/blob/master/modes/language_modes.talon
By default, title tracking activates coding languages in supported applications such as VSCode, Visual Studio (requires plugin), and Notepad++.
|
Make loop optional in asyncio.Queue
Default value is `None`, so `loop` should be optional. | @@ -2,7 +2,7 @@ import sys
from asyncio.events import AbstractEventLoop
from .coroutines import coroutine
from .futures import Future
-from typing import Any, Generator, Generic, List, TypeVar
+from typing import Any, Generator, Generic, List, TypeVar, Optional
__all__: List[str]
@@ -13,7 +13,7 @@ class QueueFull(Exception): ...
_T = TypeVar('_T')
class Queue(Generic[_T]):
- def __init__(self, maxsize: int = ..., *, loop: AbstractEventLoop = ...) -> None: ...
+ def __init__(self, maxsize: int = ..., *, loop: Optional[AbstractEventLoop] = ...) -> None: ...
def _init(self, maxsize: int) -> None: ...
def _get(self) -> _T: ...
def _put(self, item: _T) -> None: ...
|
do not use dedup key for freebsd.
HG--
branch : feature/microservices | @@ -18,7 +18,7 @@ load_rc_config $name
pidfile="/var/run/consul/consul-template.pid"
command="{{ consul_template_bin_path }}/consul-template"
-command_args="-config {{consul_template_config_dir}}/ -pid-file=${pidfile} -dedup -kill-signal=SIGTERM &"
+command_args="-config {{consul_template_config_dir}}/ -pid-file=${pidfile} -kill-signal=SIGTERM &"
start_precmd="${name}_prestart"
extra_commands="reload"
|
test chunk iname with integral loop bouunds
This fails on main because unlike the other variant of this test the expression
is piecewise quasi-affine, but not quasi-affine thereby not being caught in
loopy.symbolic.with_aff_conversion_guard and subsequently failing. | @@ -50,7 +50,8 @@ __all__ = [
from loopy.version import LOOPY_USE_LANGUAGE_VERSION_2018_2 # noqa
-def test_chunk_iname(ctx_factory):
[email protected]("fix_parameters", (True, False))
+def test_chunk_iname(ctx_factory, fix_parameters):
ctx = ctx_factory()
knl = lp.make_kernel(
@@ -65,7 +66,13 @@ def test_chunk_iname(ctx_factory):
ref_knl = knl
knl = lp.chunk_iname(knl, "i", 3, inner_tag="l.0")
knl = lp.prioritize_loops(knl, "i_outer, i_inner")
- lp.auto_test_vs_ref(ref_knl, ctx, knl, parameters=dict(n=130))
+
+ if fix_parameters:
+ ref_knl = lp.fix_parameters(ref_knl, n=130)
+ knl = lp.fix_parameters(knl, n=130)
+ lp.auto_test_vs_ref(ref_knl, ctx, knl)
+ else:
+ lp.auto_test_vs_ref(ref_knl, ctx, knl, parameters={"n": 130})
def test_collect_common_factors(ctx_factory):
|
channel save <channel>
Added a command to save out a channel or multiple channels. | @@ -7,6 +7,7 @@ class Console(Module, Pipe):
def __init__(self):
Module.__init__(self)
Pipe.__init__(self)
+ self.channel_file = None
self.channel = None
self.pipe = None
self.buffer = ''
@@ -112,6 +113,11 @@ class Console(Module, Pipe):
yield COMMAND_SET_ABSOLUTE
return move
+ def channel_file_write(self, v):
+ if self.channel_file is not None:
+ self.channel_file.write('%s\n' % v)
+ self.channel_file.flush()
+
def interface(self, command):
yield command
args = str(command).split(' ')
@@ -438,6 +444,14 @@ class Console(Module, Pipe):
yield "No Longer Watching Channel: %s" % chan
except KeyError:
yield "Channel %s is not opened." % chan
+ elif value == 'save':
+ from datetime import datetime
+ if self.channel_file is None:
+ filename = "MeerK40t-channel-{date:%Y-%m-%d_%H_%M_%S}.txt".format(date=datetime.now())
+ yield "Opening file: %s" % filename
+ self.channel_file = open(filename, "a")
+ yield "Recording Channel: %s" % chan
+ active_device.add_watcher(chan, self.channel_file_write)
return
elif command == 'device':
if len(args) == 0:
|
[OVN] Bump up transaction timeout for functional tests
On heavy loaded environments, like Neutron gates, we can
observe sporadic failures of functional tests, that are
timeouts.
Lets increase the timeout value to 15 seconds for functional
tests because looks like 5 seconds is not enought.
Closes-Bug: | @@ -260,11 +260,11 @@ class TestOVNFunctionalBase(test_plugin.Ml2PluginV2TestCase,
set_cfg('ovn_sb_certificate', self.ovsdb_server_mgr.certificate, 'ovn')
set_cfg('ovn_sb_ca_cert', self.ovsdb_server_mgr.ca_cert, 'ovn')
- # 5 seconds should be more than enough for the transaction to complete
- # for the test cases.
- # This also fixes the bug #1607639.
+ # NOTE(mjozefcz): We can find occasional functional test
+ # failures because of low timeout value - set it to 15
+ # seconds, should be enought. More info: 1868110
cfg.CONF.set_override(
- 'ovsdb_connection_timeout', 5,
+ 'ovsdb_connection_timeout', 15,
'ovn')
class TriggerCls(mock.MagicMock):
|
Define multiSelect widget template
Knockout uses selectedOptions, not value, for multiple select | </span>
</script>
+<script type="text/html" id="CommcareSettings.widgets.multiSelect">
+ <span>
+ <span data-bind="if: valueIsLegal()">
+ <select multiple="multiple"
+ class="col-sm-3 form-control"
+ data-bind="options: options,
+ selectedOptions: selectedOptions,
+ optionsText: 'label',
+ attr: {
+ disabled: !enabled(),
+ id: inputId
+ }"></select>
+ </span>
+ <span data-bind="if: !valueIsLegal()" >
+ <select multiple="multiple"
+ class="col-sm-3 form-control error"
+ data-bind="options: options,
+ selectedOptions: writeSelectedOptions,
+ optionsText: 'label',
+ attr: {
+ disabled: !enabled(),
+ id: inputId
+ },
+ optionsCaption: value()"></select>
+ </span>
+ </span>
+</script>
+
<script type="text/html" id="CommcareSettings.widgets.bool">
<div class="checkbox-app-settings">
<input type="checkbox"
|
add adopter
* Update ADOPTERS.md
add adopter
* keep the list in alphabetical order | @@ -12,4 +12,5 @@ Please keep the list in alphabetical order.
| [canonical](https://ubuntu.com/) |[@RFMVasconcelos](https://github.com/rfmvasconcelos) | Hyperparameter tuning for customer projects in Defense and Fintech |
| [cisco](https://cisco.com/) |[@ramdootp](https://github.com/ramdootp) | Hyperparameter tuning for conversational AI interface using Rasa |
| [cubonacci](https://www.cubonacci.com) |[@janvdvegt](https://github.com/janvdvegt) | Hyperparameter tuning within the Cubonacci machine learning platform |
+| [fuzhi](http://www.fuzhi.ai/) | [@planck0591](https://github.com/planck0591) | Experiment and Trial in autoML Platform |
| [karrot](https://uk.karrotmarket.com/) |[@muik](https://github.com/muik) | Hyperparameter tuning in Karrot ML Platform |
|
Fix DeprecationWarning from SciPy iterative solver
This addresses the following deprecation warning in SciPy 1.4.1:
```
scipy/sparse/linalg/isolve/iterative.py:2: DeprecationWarning: scipy.sparse.linalg.gmres called without specifying `atol`. The default value will be changed in a future release. For compatibility, specify a value for `atol` explicitly, e.g., ``gmres(..., atol=0)``, or to retain the old behavior ``gmres(..., atol='legacy')``
``` | +from functools import partial
import numpy as _np
from scipy.linalg import lstsq as _lstsq
from scipy.linalg import cho_factor as _cho_factor
@@ -56,11 +57,12 @@ class SR:
if self._use_iterative:
if lsq_solver is None:
- self._sparse_solver = gmres if self.is_holomorphic else minres
- elif lsq_solver == "gmres":
- self._sparse_solver = gmres
+ lsq_solver = "gmres" if self.is_holomorphic else "minres"
+
+ if lsq_solver == "gmres":
+ self._sparse_solver = partial(gmres, atol="legacy")
elif lsq_solver == "cg":
- self._sparse_solver = cg
+ self._sparse_solver = partial(cg, atol="legacy")
elif lsq_solver == "minres":
if self._is_holomorphic:
self._sparse_solver = minres
|
Tweak daemon docs
Summary: just a few cosmetic changes / missign links.
Test Plan: View daemon page
Reviewers: johann, prha, alangenfeld | @@ -9,11 +9,10 @@ import PyObject from 'components/PyObject';
# Dagster Daemon
Several Dagster features, like [schedules](/overview/schedules-sensors/schedules), [sensors](/overview/schedules-sensors/sensors),
-and run queueing, require a long-running `dagster-daemon` process to be included
+and [run queueing](/overview/pipeline-runs/limiting-run-concurrency), require a long-running `dagster-daemon` process to be included
with your deployment.
-To run the `dagster-daemon` process locally, simply run the following command on the same
-machine that you use to run Dagit:
+To start the `dagster-daemon` process locally, launch the following command and keep the process running:
```shell
dagster-daemon run
@@ -25,19 +24,21 @@ other environments, like Docker or Kubernetes.
## Available daemons
The `dagster-daemon` process reads from your [Dagster instance](/overview/instances/dagster-instance) file to
-determine which daemons it should include. Each of those daemons then executes on a regular interval.
+determine which daemons it should include. Each of those daemons then runs on a regular interval.
+
The following daemons are currently available:
-- The _scheduler daemon_ is responsible for create runs rom any [schedules](/overview/schedules-sensors/schedules) that
- are turned on. This daemon will run as long as you don't have a scheduler other than <PyObject module="dagster.core.scheduler" object="DagsterDaemonScheduler" />
- set as the scheduler on your instance.
+- The _scheduler daemon_ is responsible for creating runs from any [schedules](/overview/schedules-sensors/schedules) that
+ are turned on. This daemon will run as long as you have not overridden the default <PyObject module="dagster.core.scheduler" object="DagsterDaemonScheduler" />
+ as the scheduler on your instance.
-- The _run queue daemon_ is responsible for launching queued runs, using any limits and prioritization
- rules you've set on your instance. You can enable this daemon by setting the run coordinator on your
+- The _run queue daemon_ is responsible for launching queued runs, taking into account any limits and prioritization
+ rules you've set on your instance. You can enable this daemon by setting the
+ [run coordinator](/overview/pipeline-runs/run-coordinator) on your
instance to <PyObject module="dagster.core.run_coordinator" object="QueuedRunCoordinator" />.
-- The _sensor daemon_ is responsible for executing the evaluation function of any running
- [sensors](/overview/schedules-sensors/sensors) on your instance. This daemon is always enabled.
+- The _sensor daemon_ is responsible for creating runs from any
+ [sensors](/overview/schedules-sensors/sensors) that are turned on. This daemon is always enabled.
## Daemons in Dagit
@@ -45,5 +46,5 @@ To check the status of your `dagster-daemon` process within Dagit, click on "Sta
This will take you a page where you can see information about each daemon that's currently configured on your instance.
Each daemon periodically writes a heartbeat to your instance storage, so if a daemon doesn't show a
-recent heartbeat on this page, it likely indicates that you should check the logs on your `dagster-daemon`
+recent heartbeat on this page, it likely indicates that you should check the logs from your `dagster-daemon`
process for errors.
|
Fix typo in docs.
[skip ci] | @@ -685,7 +685,7 @@ currently:
* :py:class:`JSONField` field type, for storing JSON data.
* :py:class:`BinaryJSONField` field type for the ``jsonb`` JSON data type.
* :py:class:`TSVectorField` field type, for storing full-text search data.
-* :py:class:`DateTimeTZ` field type, a timezone-aware datetime field.
+* :py:class:`DateTimeTZField` field type, a timezone-aware datetime field.
In the future I would like to add support for more of postgresql's features.
If there is a particular feature you would like to see added, please
|
Documentation: Fix sphinx build failing;
Re-add previous check for sphinx | @@ -223,17 +223,20 @@ class Config:
else:
self.parser = ConfigParser.ConfigParser(defaults=os.environ)
+ # test to not fail when build the API doc
+ builds_doc = 'sphinx' in sys.modules
+
if 'RUCIO_CONFIG' in os.environ:
self.configfile = os.environ['RUCIO_CONFIG']
else:
configs = [os.path.join(confdir, 'rucio.cfg') for confdir in get_config_dirs()]
self.configfile = next(iter(filter(os.path.exists, configs)), None)
- if self.configfile is None:
+ if not builds_doc and self.configfile is None:
raise RuntimeError('Could not load Rucio configuration file. '
'Rucio looked in the following paths for a configuration file, in order:'
'\n\t' + '\n\t'.join(configs))
- if not self.parser.read(self.configfile) == [self.configfile]:
+ if not builds_doc and not self.parser.read(self.configfile) == [self.configfile]:
raise RuntimeError('Could not load Rucio configuration file. '
'Rucio tried loading the following configuration file:'
'\n\t' + self.configfile)
|
Delete these several lines
The `_register_rules` method on `Scheduler` was creating a Python set and adding a key associated with each rule to that set, but then never using it for anything. This commit removes that code. | @@ -168,18 +168,13 @@ class Scheduler:
"""Create a native Tasks object, and record the given RuleIndex on it."""
tasks = self._native.new_tasks()
- registered = set()
for output_type, rules in rule_index.rules.items():
for rule in rules:
- key = (output_type, rule)
- registered.add(key)
-
if type(rule) is TaskRule:
self._register_task(tasks, output_type, rule, rule_index.union_rules)
else:
raise ValueError("Unexpected Rule type: {}".format(rule))
-
return tasks
def _register_task(
|
Update sentinel1-slc.yaml
updated tutorial url | @@ -29,7 +29,7 @@ Resources:
DataAtWork:
Tutorials:
- Title: Interferometric Synthetic Aperture Radar Tutorial
- URL: https://github.com/live-eo/sentinel1-slc/
+ URL: https://github.com/live-eo/sentinel1-slc/blob/main/docs/tutorial_InSAR.md
AuthorName: LiveEO
AuthorURL: https://live-eo.com/
Tools & Applications:
|
FieldAccess: fix access to entity fields
FieldAccess.implicit_deref means that we automatically dereference the
receiver of the field access, not the retreived field. So there is no
need (and it is even incorrect) to strip the entity type for the
retreived field in our computations.
TN: | @@ -652,7 +652,6 @@ class FieldAccess(AbstractExpression):
if self.implicit_deref:
prefix = '{}.Node'.format(prefix)
- node_data_struct = node_data_struct.element_type
# If this is a node field/property, we must pass the precise type
# it expects for "Self".
|
lnworker: extend swap label only if we are still watching the address
Without this, old swap transactions for which we have deleted the
channel are incorrectly labeled. | @@ -936,6 +936,7 @@ class LNWallet(LNWorker):
amount_msat = 0
label = 'Reverse swap' if swap.is_reverse else 'Forward swap'
delta = current_height - swap.locktime
+ if self.wallet.adb.is_mine(swap.funding_txid):
tx_height = self.wallet.adb.get_tx_height(swap.funding_txid)
if swap.is_reverse and tx_height.height <= 0:
label += ' (%s)' % _('waiting for funding tx confirmation')
|
DOC: Fix import of default_rng
Fixes 19812 | @@ -235,7 +235,7 @@ library. Below, two arrays are created with shapes (2,3) and (2,3,2),
respectively. The seed is set to 42 so you can reproduce these
pseudorandom numbers::
- >>> import numpy.random.default_rng
+ >>> from numpy.random import default_rng
>>> default_rng(42).random((2,3))
array([[0.77395605, 0.43887844, 0.85859792],
[0.69736803, 0.09417735, 0.97562235]])
|
[All] Add window_exists API function
Also make child window UIDs shorter and friendlier. | @@ -190,7 +190,7 @@ def create_window(title, url=None, width=800, height=600,
:param background_color: Background color as a hex string that is displayed before the content of webview is loaded. Default is white.
:return:
"""
- uid = 'webview' + uuid4().hex
+ uid = 'child_' + uuid4().hex[:8]
valid_color = r'^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(valid_color, background_color):
@@ -271,6 +271,19 @@ def evaluate_js(script, uid='master'):
raise Exception("Cannot call function: No webview exists with uid: {}".format(uid))
+def window_exists(uid='master'):
+ """
+ Check whether a webview with the given UID is up and running
+ :param uid: uid of the target instance
+ :return: True if the window exists, False otherwise
+ """
+ try:
+ get_current_url(uid)
+ return True
+ except:
+ return False
+
+
def _escape_string(string):
return string.replace('"', r'\"').replace('\n', r'\n')
|
Implementation of heat cdist(X,y,metric)
Case X.split is None and Y.split = 0 still needs design
currenly only 2D tensors supported
Bugfix for linalg.dot | @@ -44,8 +44,13 @@ def dot(a, b, out=None):
return a * b
elif a.numdims == 1 and b.numdims == 1:
# 1. If both a and b are 1-D arrays, it is inner product of vectors.
- if a.split is not None or b.split is not None:
+ if a.split is None and b.split is None:
+ sl = slice(None)
+ elif a.split is not None and b.split is not None:
+ sl = a.comm.chunk(a.shape, a.split)[2]
+ else: # a.split is not None or b.split is not None:
sl = a.comm.chunk(a.shape, a.split if a.split is not None else b.split)[2]
+
ret = torch.dot(a[sl]._DNDarray__array, b[sl]._DNDarray__array)
if a.is_distributed() or b.is_distributed():
a.comm.Allreduce(MPI.IN_PLACE, ret, MPI.SUM)
|
doc: Update "Delegate to Hashed Bins" in tutorial
Explain and show output of delegate_hashed_bins() function call in
tutorial snippet.
Also update the subsequent comment for better continuity. | @@ -647,11 +647,24 @@ to some role.
>>> targets = repository.get_filepaths_in_directory(
... 'repository/targets/myproject', recursive_walk=True)
+# Delegate trust to 32 hashed bin roles. Each role is responsible for the set
+# of target files, determined by the path hash prefix. TUF evenly distributes
+# hexadecimal ranges over the chosen number of bins (see output).
+# To initialize the bins we use one key, which TUF warns us about (see output).
+# However, we can assign separate keys to each bin, with the method used in
+# previous sections, accessing a bin by its hash prefix range name, e.g.:
+# "repository.targets('00-07').add_verification_key('public_00-07_key')".
>>> repository.targets('unclaimed').delegate_hashed_bins(
... targets, [public_unclaimed_key], 32)
-
-# delegated_hashed_bins() only assigns the public key(s) of the hashed bins, so
-# the private keys may be manually loaded as follows:
+Creating hashed bin delegations.
+1 total targets.
+32 hashed bins.
+256 total hash prefixes.
+Each bin ranges over 8 hash prefixes.
+Adding a verification key that has already been used. [repeated 32x]
+
+# The hashed bin roles can also be accessed by iterating the "delegations"
+# property of the delegating role, which we do here to load the signing key.
>>> for delegation in repository.targets('unclaimed').delegations:
... delegation.load_signing_key(private_unclaimed_key)
|
fix bloch sphere distortion
Matplotlib stopped to stretch the plot to fit it in a square box from 3.3.0. We do it manually. See | @@ -461,6 +461,9 @@ class Bloch:
self.axes.set_xlim3d(-0.7, 0.7)
self.axes.set_ylim3d(-0.7, 0.7)
self.axes.set_zlim3d(-0.7, 0.7)
+ # Manually set aspect ratio to fit a square bounding box.
+ # Matplotlib did this stretching for < 3.3.0, but not above.
+ self.axes.set_box_aspect((1, 1, 1))
self.axes.grid(False)
self.plot_back()
|
Adding timestamps to the beginning of every test file in run_test
Summary: Pull Request resolved: | from __future__ import print_function
import argparse
+from datetime import datetime
import os
import shlex
import shutil
@@ -371,7 +372,8 @@ def main():
test_name = 'test_{}'.format(test)
test_module = parse_test_module(test)
- print_to_stderr('Running {} ...'.format(test_name))
+ # Printing the date here can help diagnose which tests are slow
+ print_to_stderr('Running {} ... [{}]'.format(test_name, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(python, test_name, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
|
Suppress pooch-related INFO messages
Files downloaded using geocat-datafiles were generating output cells in
generated documentation pages, this commit suppresses those messages. | @@ -87,3 +87,11 @@ sphinx_gallery_conf = {
html_theme_options = {
'navigation_depth': 2,
}
+
+# the following lines suppress INFO messages when files are downloaded using geocat.datafiles
+import geocat.datafiles
+import logging
+import pooch
+logger = pooch.get_logger()
+logger.setLevel(logging.WARNING)
+geocat.datafiles.get("registry.txt")
|
Add ContainerDefaultPidsLimit to set default pid limits in containers.conf
Starting With podman 2.X the default pids-limits has been halved from
4096 to 2048 (see the dep-on change for
more details).
Let's add a parameter to override this value so an operator can raise
this limit globally.
Depends-On: | @@ -53,6 +53,11 @@ parameters:
username: pa55word
'192.0.2.1:8787':
registry_username: password
+ ContainerDefaultPidsLimit:
+ type: number
+ default: 4096
+ description: Setting to configure the default pids_limit in /etc/container/container.conf.
+ This is supported starting with podman 2.0.x
SystemdDropInDependencies:
default: true
description: tell the tripleo_container_manage to inject
@@ -85,6 +90,7 @@ outputs:
# default that is overwritten by the heat -> dict conversion
container_registry_logins: {}
container_registry_logins_json: {get_param: ContainerImageRegistryCredentials}
+ container_default_pids_limit: {get_param: ContainerDefaultPidsLimit}
- name: Convert logins json to dict
set_fact:
@@ -108,6 +114,7 @@ outputs:
tasks_from: tripleo_podman_install.yml
vars:
tripleo_container_registry_insecure_registries: "{{ container_registry_insecure_registries }}"
+ tripleo_container_default_pids_limit: "{{ container_default_pids_limit }}"
- name: Run podman login
include_role:
|
Bump ffmpeg version
opencv requires a more recent ffmpeg.
Even though ffmpeg tends to add symbols when incrementing the
patch version, versions from 3.2.3 up to 3.2.5 don't add/remove
symbols when compared to each other. | @@ -41,7 +41,7 @@ pinned = {
'boost-cpp': 'boost-cpp 1.64.*', # NA
'bzip2': 'bzip2 1.0.*', # 1.0.6
'cairo': 'cairo 1.14.*', # 1.12.18
- 'ffmpeg': 'ffmpeg >=2.8,<2.8.11', # NA
+ 'ffmpeg': 'ffmpeg >=3.2.3,<3.2.6', # NA
'fontconfig': 'fontconfig 2.12.*', # 2.12.1
'freetype': 'freetype 2.7', # 2.5.5
'geos': 'geos 3.5.1', # 3.5.0
|
Adding more hooks
* bandit will perform a static code analysis, giving an report of
vulnerabilities.
* nosetests will simply run the existing test suite. | @@ -6,3 +6,15 @@ repos:
- id: end-of-file-fixer
- id: check-merge-conflict
- id: flake8
+
+- repo: https://github.com/PyCQA/bandit
+ rev: '1.6.2'
+ hooks:
+ - id: bandit
+
+- repo: local
+ hooks:
+ - id: nosetests
+ name: nosetests
+ entry: nosetests --with-coverage
+ language: system
|
add test for to_matrix method of standard gates
* start to_matrix method for ControlledGate
* add test to check to_matrix of standard gates
* remove stale code
* remove to_matrix from ControlledGate
consider adding back later.
* linting
* add exceptions to catch
* linting
* remove unused import | # pylint: disable=missing-docstring
import unittest
+from inspect import signature
-from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
+from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister, execute
from qiskit.qasm import pi
from qiskit.exceptions import QiskitError
from qiskit.circuit.exceptions import CircuitError
from qiskit.test import QiskitTestCase
+from qiskit.circuit import Gate, ControlledGate
+from qiskit import BasicAer
+from qiskit.quantum_info.operators.predicates import matrix_equal, is_unitary_matrix
class TestStandard1Q(QiskitTestCase):
@@ -1211,5 +1215,40 @@ class TestStandard3Q(QiskitTestCase):
self.assertEqual(instruction_set.instructions[2].params, [])
+class TestStandardMethods(QiskitTestCase):
+ """Standard Extension Test."""
+
+ def test_to_matrix(self):
+ """test gates implementing to_matrix generate matrix which matches
+ definition."""
+ params = [0.1 * i for i in range(10)]
+ gate_class_list = Gate.__subclasses__() + ControlledGate.__subclasses__()
+ simulator = BasicAer.get_backend('unitary_simulator')
+ for gate_class in gate_class_list:
+ sig = signature(gate_class.__init__)
+ free_params = len(sig.parameters) - 1 # subtract "self"
+ try:
+ gate = gate_class(*params[0:free_params])
+ except (CircuitError, QiskitError, AttributeError):
+ self.log.info(
+ 'Cannot init gate with params only. Skipping %s',
+ gate_class)
+ continue
+ if gate.name in ['U', 'CX']:
+ continue
+ circ = QuantumCircuit(gate.num_qubits)
+ circ.append(gate, range(gate.num_qubits))
+ try:
+ gate_matrix = gate.to_matrix()
+ except CircuitError:
+ # gate doesn't implement to_matrix method: skip
+ self.log.info('to_matrix method FAILED for "%s" gate',
+ gate.name)
+ continue
+ definition_unitary = execute([circ], simulator).result().get_unitary()
+ self.assertTrue(matrix_equal(definition_unitary, gate_matrix))
+ self.assertTrue(is_unitary_matrix(gate_matrix))
+
+
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Fix to have collectd only from EPEL
The Undercloud was receiving collectd from opstools which
requires install the extra collectd package (disk,python)
EPEL does not require those packages. | yum:
name: "{{ item }}"
state: present
+ disablerepo: "*"
+ enablerepo: "epel"
become: true
with_items:
- collectd
- collectd-apache
- collectd-ceph
- - collectd-disk
- collectd-mysql
- collectd-ping
- - collectd-python
- collectd-turbostat
+ when: collectd_from_epel
# (sai) Since we moved to containers we don't have java installed on the host
# anymore but it is needed for collectd-java
|
move fire method back into RepeatRecord
now that it has been simplified | @@ -298,12 +298,6 @@ class Repeater(QuickCachedDocumentMixin, Document, UnicodeMixIn):
return HTTPDigestAuth(self.username, self.password)
return None
- def fire_for_record(self, repeat_record, force_send):
- if repeat_record.try_now() or force_send:
- repeat_record.overall_tries += 1
- self.post_for_record(repeat_record)
- repeat_record.save()
-
def post_for_record(self, repeat_record):
headers = self.get_headers(repeat_record)
auth = self.get_auth()
@@ -567,7 +561,10 @@ class RepeatRecord(Document):
self.save()
def fire(self, force_send=False):
- self.repeater.fire_for_record(self, force_send=force_send)
+ if self.try_now() or force_send:
+ self.overall_tries += 1
+ self.repeater.post_for_record(self)
+ self.save()
def handle_success(self, response):
"""Do something with the response if the repeater succeeds
|
[Chore] Fixup tezos-baker-013-PtJakart.rb
Problem: Startup script for 'tezos-baker-013-PtJakart.rb' is written to
non-existing path. This causes build to fail.
Solution: Fix startup script path. | @@ -87,7 +87,7 @@ class TezosBaker013Ptjakart < Formula
launch_baker "$BAKER_ACCOUNT"
fi
EOS
- File.write("tezos-baker-013-PtJakart", startup_contents)
+ File.write("tezos-baker-013-PtJakart-start", startup_contents)
bin.install "tezos-baker-013-PtJakart-start"
make_deps
install_template "src/proto_013_PtJakart/bin_baker/main_baker_013_PtJakart.exe",
|
Add extra development data so the All England page loads
Previously the absence of the PPU ImportLog entry caused the page to
throw an error. | from django.core.management import call_command
from django.core.management.base import BaseCommand
-from frontend.tests.test_api_spending import TestAPISpendingViewsPPUTable
+from frontend.models import ImportLog, PPUSaving
+from frontend.tests.test_api_spending import ApiTestBase, TestAPISpendingViewsPPUTable
class Command(BaseCommand):
@@ -13,3 +14,6 @@ class Command(BaseCommand):
# API tests
fixtures = TestAPISpendingViewsPPUTable.fixtures
call_command('loaddata', *fixtures)
+ ApiTestBase.setUpTestData()
+ max_ppu_date = PPUSaving.objects.order_by('-date')[0].date
+ ImportLog.objects.create(current_at=max_ppu_date, category='ppu')
|
Do not generate API documentation for test classes
Our test classes are located in the same directory structure as the main
classes. They need to be excluded from the generation of documentation. | @@ -38,6 +38,7 @@ extensions = [
# Document Python Code
autoapi_type = 'python'
autoapi_dirs = [ '../../heat' ]
+autoapi_ignore= [ '*/tests/*' ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
|
DOC: List venues for users' questions, discussion
[ci skip] | @@ -62,6 +62,15 @@ to perform basic tests. To try all available tests, ``./run_tests.py full``.
For alternatives and a summary of usage, ``./run_tests.py -h``
+Contact and support
+-------------------
+
+* Ask for help on the `tulip-control-users mailing list <https://sourceforge.net/p/tulip-control/mailman/tulip-control-users>`_
+* For release announcements, join the `tulip-control-announce mailing list <https://sourceforge.net/p/tulip-control/mailman/tulip-control-announce>`_
+* Bug reports and feature requests should be made at https://github.com/tulip-control/tulip-control/issues
+ Please check for prior discussion and reports before opening a new issue.
+
+
License
-------
|
Fix Small Image Preview Sizing.
Before the sizing of the preview would be 100px in height regardless of
whether the image was that tall. Now it is any value up to 100px. | @@ -1997,11 +1997,16 @@ div.floating_recipient {
.message_inline_image {
margin-bottom: 5px;
margin-left: 5px;
- height: 100px;
+ max-height: 100px;
display: block !important;
border: none !important;
}
+/* this forces the line to have inline-block styling which gives it a height. */
+.message_inline_image a {
+ display: inline-block;
+}
+
.message_inline_ref {
margin-bottom: 5px;
margin-left: 5px;
@@ -2014,7 +2019,7 @@ div.floating_recipient {
.message_inline_image img,
.message_inline_ref img {
height: auto;
- max-height: 100%;
+ max-height: 100px;
float: left;
margin-right: 10px;
}
|
Fix cluster_ CLI examples
Add missing "cluster_" to cluster_health, cluster_stats method doc | @@ -232,7 +232,7 @@ def cluster_health(index=None, level='cluster', local=False, hosts=None, profile
CLI example::
- salt myminion elasticsearch.health
+ salt myminion elasticsearch.cluster_health
'''
es = _get_instance(hosts, profile)
@@ -253,7 +253,7 @@ def cluster_stats(nodes=None, hosts=None, profile=None):
CLI example::
- salt myminion elasticsearch.stats
+ salt myminion elasticsearch.cluster_stats
'''
es = _get_instance(hosts, profile)
|
ceph-iscsi: set the pool name in the config file
When using a custom pool for iSCSI gateway then we need to set the pool
name in the configuration otherwise the default rbd pool name will be
used. | [config]
cluster_name = {{ cluster }}
+pool = {{ iscsi_pool_name }}
+
# API settings.
# The API supports a number of options that allow you to tailor it to your
# local environment. If you want to run the API under https, you will need to
|
Fixed Health plugin
Fixed PEP 8 issues and remove unused variable. | @@ -29,9 +29,6 @@ def health_bmi(jarvis, s):
return None
-
-
-
def bmi_categories(bmi):
if(bmi < 18.5):
category = "Underweight"
@@ -59,7 +56,6 @@ def health_calories(jarvis, s):
#Example: health calories woman 27 164 60 3
"""
- error = 0
strings = s.split()
if(len(strings) == 5):
gender = strings[0]
@@ -77,16 +73,8 @@ def health_calories(jarvis, s):
elif(gender == 'woman'):
gender_no = -161
- if( gender_no != 0
- and age > 14
- and height > 0.0
- and weight > 0.0
- and level > 0
- and level < 5):
- brm = float(10*weight
- + 6.25*height
- - 5*age
- + gender_no) * exersise_level(level)
+ if(gender_no != 0 and age > 14 and height > 0.0 and weight > 0.0 and level > 0 and level < 5):
+ brm = float(10 * weight + 6.25 * height - 5 * age + gender_no) * exersise_level(level)
brm_loss = brm - 500.0
brm_put_on = brm + 500.0
jarvis.say("Daily caloric intake : " + str(brm))
@@ -97,7 +85,6 @@ def health_calories(jarvis, s):
return None
-
def exersise_level(level):
multiplier = 1
if(level == 1):
|
Dont clip page margins on account of body overflow
Though the `overflow` on the root element must be propagated to the
viewport we mustn't cut off the page margins in `draw_stacking_context()`
1. never clip when a PageBox is rendered
2. do the proposed clip when drawing the <BlockBox html>
fixes | @@ -184,6 +184,14 @@ def draw_stacking_context(context, stacking_context, enable_hinting):
# See http://www.w3.org/TR/CSS2/zindex.html
with stacked(context):
box = stacking_context.box
+
+ # apply the viewport_overflow to the html box, see #35
+ if box.element_tag == 'html' and (
+ stacking_context.page.style['overflow'] != 'visible'):
+ rounded_box_path(context,
+ stacking_context.page.rounded_padding_box())
+ context.clip()
+
if box.is_absolutely_positioned() and box.style['clip']:
top, right, bottom, left = box.style['clip']
if top == 'auto':
@@ -223,7 +231,9 @@ def draw_stacking_context(context, stacking_context, enable_hinting):
context, stacking_context.page, box, enable_hinting)
with stacked(context):
- if box.style['overflow'] != 'visible':
+ # dont clip the PageBox, see #35
+ if box.style['overflow'] != 'visible' and not isinstance(
+ box, boxes.PageBox):
# Only clip the content and the children:
# - the background is already clipped
# - the border must *not* be clipped
|
python.talon: allow optional "state" before "raise"/"except"
Currently we have `state raise` and `raise {user.python_exception}`
commands. This annoyingly requires you to remember whether you are going
to say an exception name in order to know whether to say `state` or not.
Simpler to always allow it. | @@ -36,8 +36,9 @@ self taught: "self."
pie test: "pytest"
state past: "pass"
-raise {user.python_exception}: user.insert_between("raise {python_exception}(", ")")
-except {user.python_exception}: "except {python_exception}:"
+[state] raise {user.python_exception}:
+ user.insert_between("raise {python_exception}(", ")")
+[state] except {user.python_exception}: "except {python_exception}:"
dock string: user.code_comment_documentation()
dock {user.python_docstring_fields}:
|
use awc_location_local instead of temp table
For some reason the agg query was joining on the temp table
that had been used to update the agg table
which only contained awc that were not already in the agg
table | @@ -47,8 +47,11 @@ class InactiveAwwsAggregationDistributedHelper(BaseICDSAggregationDistributedHel
def missing_location_query(self):
return """
- DROP TABLE IF EXISTS "{temp_tablename}";
- CREATE TEMPORARY TABLE "{temp_tablename}" AS SELECT
+ INSERT INTO "{table_name}" (
+ awc_id, awc_name, awc_site_code, supervisor_id, supervisor_name,
+ block_id, block_name, district_id, district_name, state_id, state_name
+ ) (
+ SELECT
loc.doc_id as awc_id,
loc.awc_name as awc_name,
'awc' || loc.awc_site_code as awc_site_code,
@@ -63,17 +66,11 @@ class InactiveAwwsAggregationDistributedHelper(BaseICDSAggregationDistributedHel
FROM "{awc_location_table_name}" loc
WHERE loc.doc_id not in (
SELECT aww.awc_id FROM "{table_name}" aww
- ) and loc.doc_id != 'All';
- INSERT INTO "{table_name}" (
- awc_id, awc_name, awc_site_code, supervisor_id, supervisor_name,
- block_id, block_name, district_id, district_name, state_id, state_name
- ) (
- SELECT * FROM "{temp_tablename}"
- );
+ ) and loc.doc_id != 'All'
+ )
""".format(
table_name=self.aggregate_parent_table,
- awc_location_table_name='awc_location',
- temp_tablename=self.temp_tablename
+ awc_location_table_name='awc_location_local'
)
def aggregate_query(self):
@@ -89,15 +86,13 @@ class InactiveAwwsAggregationDistributedHelper(BaseICDSAggregationDistributedHel
ucr.first_submission as first_submission,
ucr.last_submission as last_submission
FROM "tmp_usage" ucr
- JOIN "{temp_tablename}" loc
- ON ucr.awc_id = loc.awc_id
+ JOIN "{awc_location_table_name}" loc
+ ON ucr.awc_id = loc.doc_id
) ut
WHERE agg_table.awc_id = ut.awc_id;
- DROP TABLE "{temp_tablename}";
DROP TABLE "tmp_usage";
""".format(
table_name=self.aggregate_parent_table,
ucr_table_query=ucr_query,
- awc_location_table_name='awc_location',
- temp_tablename=self.temp_tablename
+ awc_location_table_name='awc_location_local',
), params
|
add endpoints to fetch a user based on their ID only
this functions the same as `validate_invitation_token`, but without
having the signed token, instead just the ID. This is so later endpoints
within the invite flow can also fetch the invited user | @@ -38,3 +38,15 @@ def validate_invitation_token(invitation_type, token):
return jsonify(data=invited_user.serialize()), 200
else:
raise InvalidRequest("Unrecognised invitation type: {}".format(invitation_type))
+
+
+@global_invite_blueprint.route('/service/<uuid:invited_user_id>', methods=['GET'])
+def get_invited_user(invited_user_id):
+ invited_user = get_invited_user_by_id(invited_user_id)
+ return jsonify(data=invited_user_schema.dump(invited_user).data), 200
+
+
+@global_invite_blueprint.route('/organisation/<uuid:invited_org_user_id>', methods=['GET'])
+def get_invited_org_user(invited_org_user_id):
+ invited_user = dao_get_invited_organisation_user(invited_org_user_id)
+ return jsonify(data=invited_user.serialize()), 200
|
Fix OAuth flow for production setup.
Fixes | @@ -19,9 +19,9 @@ import shutil
import subprocess
import sys
+import google_auth_httplib2
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import discovery
-import google_auth_httplib2
import httplib2
from local.butler import appengine
@@ -70,7 +70,7 @@ class DomainVerifier(object):
flow = InstalledAppFlow.from_client_secrets_file(
oauth_client_secrets_path,
scopes=['https://www.googleapis.com/auth/siteverification'])
- credentials = flow.run_console()
+ credentials = flow.run_local_server()
http = google_auth_httplib2.AuthorizedHttp(
credentials, http=httplib2.Http())
@@ -140,7 +140,7 @@ def enable_services(gcloud):
def replace_file_contents(file_path, replacements):
"""Replace contents of a file."""
- with open(file_path) as f:
+ with open(file_path, encoding='utf-8') as f:
old_contents = f.read()
contents = old_contents
for find, replace in replacements:
@@ -149,14 +149,13 @@ def replace_file_contents(file_path, replacements):
if contents == old_contents:
return
- with open(file_path, 'w') as f:
+ with open(file_path, 'w', encoding='utf-8') as f:
f.write(contents)
def project_bucket(project_id, bucket_name):
"""Return a project-specific bucket name."""
- return '{name}.{project_id}.appspot.com'.format(
- name=bucket_name, project_id=project_id)
+ return f'{bucket_name}.{project_id}.appspot.com'
def create_new_config(gcloud, project_id, new_config_dir,
|
Update widefield.py
Increased number of rays | @@ -37,7 +37,7 @@ def imagingPath(a=10, b=10, title=""):
# Input from the expected field of view
-nRays=100000
+nRays=1000000
objectHalfHeight = 5
inputRays = RandomUniformRays(yMax = objectHalfHeight,
yMin = -objectHalfHeight,
|
admin: Avoid passing unnecessary policy values to admin_tab.hbs.
We do not require values of realm_create_stream_policy,
realm_invite_to_stream_policy, realm_private_message_policy
and realm_wildcard_mention_policy in the organization settings
templates, as we handle the dropdown values of these settings
in javascript code (settings_org.js) only and these values
are not used anywhere in templates. | @@ -72,12 +72,8 @@ export function build_page() {
server_inline_url_embed_preview: page_params.server_inline_url_embed_preview,
realm_default_twenty_four_hour_time_values: settings_config.twenty_four_hour_time_values,
realm_authentication_methods: page_params.realm_authentication_methods,
- realm_create_stream_policy: page_params.realm_create_stream_policy,
- realm_invite_to_stream_policy: page_params.realm_invite_to_stream_policy,
realm_user_group_edit_policy: page_params.realm_user_group_edit_policy,
USER_GROUP_EDIT_POLICY_MEMBERS: 1,
- realm_private_message_policy: page_params.realm_private_message_policy,
- realm_wildcard_mention_policy: page_params.realm_wildcard_mention_policy,
realm_name_changes_disabled: page_params.realm_name_changes_disabled,
realm_email_changes_disabled: page_params.realm_email_changes_disabled,
realm_avatar_changes_disabled: page_params.realm_avatar_changes_disabled,
|
DOC: added summary to changelog
Added a summary of this pull request to the changelog. | @@ -7,6 +7,8 @@ This project adheres to [Semantic Versioning](https://semver.org/).
--------------------
* New Features
* Added the property `empty_partial` to the Constellation class
+ * Added the option to apply custom functions at the Constellation or
+ Instrument level within the Constellation class
* Added option to load Constellation for registered Instruments using lists
of platforms, names, tags, and/or inst_ids, which are new attributes
* Added hidden Constellation methods to determine unique attribute elements
|
Corrects the AUTH_PROFILE_MODULE
This takes the form of app_name.model_name, not a python path. | @@ -48,7 +48,7 @@ DEFAULT_FROM_EMAIL = '[email protected]'
ANONYMOUS_USER_NAME = 'AnonymousUser'
EVERYONE_GROUP_NAME = 'everyone'
-AUTH_PROFILE_MODULE = 'grandchallenge.profiles.UserProfile'
+AUTH_PROFILE_MODULE = 'profiles.UserProfile'
USERENA_USE_HTTPS = False
USERENA_DEFAULT_PRIVACY = 'open'
LOGIN_URL = '/accounts/signin/'
|
chore: ignore storybook entries in test coverage
Since they are more or less secondary test code for visualising how
components look in the UI | "collectCoverageFrom": [
"src/**/*.{ts,tsx}",
"!**/node_modules/**",
- "!src/pb/**"
+ "!src/pb/**",
+ "!src/stories/**",
+ "!src/**/*.stories.tsx"
],
"resetMocks": true
},
|
commands/run: Update run output with final run config
The RunInfo object in the run output is initally created before the
config has been fully parsed therefore attributes for the project and
run name are never updated, once the config has been finalized make sure
to update the relavant information. | @@ -112,6 +112,11 @@ class RunCommand(Command):
'by running "wa list workloads".'
raise ConfigError(msg.format(args.agenda))
+ # Update run info with newly parsed config values
+ output.info.project = config.run_config.project
+ output.info.project_stage = config.run_config.project_stage
+ output.info.run_name = config.run_config.run_name
+
executor = Executor()
executor.execute(config, output)
|
Error if dataset size = 1 batch.
Fix for the bug mentioned in | @@ -370,6 +370,7 @@ class Trainer(TrainerIO):
# determine when to check validation
self.val_check_batch = int(self.nb_tng_batches * self.val_check_interval)
+ self.val_check_batch = max(1, self.val_check_batch)
def __add_tqdm_metrics(self, metrics):
for k, v in metrics.items():
|
Update extensions.md
add babel extension | @@ -14,3 +14,5 @@ A list of Sanic extensions created by the community.
- [UserAgent](https://github.com/lixxu/sanic-useragent): Add `user_agent` to request
- [Limiter](https://github.com/bohea/sanic-limiter): Rate limiting for sanic.
- [Sanic EnvConfig](https://github.com/jamesstidard/sanic-envconfig): Pull environment variables into your sanic config.
+- [Babel](https://github.com/lixxu/sanic-babel): Adds i18n/l10n support to Sanic applications with the help of the
+`Babel` library
|
Update functions.rst
For New-Style functions, one should use ".apply()" method instead of "__call__()". | @@ -660,7 +660,7 @@ First, we have to define a function on variables:
return gx, gW, gb
def linear(x, W, b):
- return LinearFunction()(x, W, b)
+ return LinearFunction().apply((x, W, b))
This function takes three arguments: input, weight, and bias.
It can be used as a part of model definition, though is inconvenient since the user have to manage the weight and bias parameters directly.
|
Refactoring of DebugListener; no changes to functionality
Also added a little bit more documentation. | @@ -10,10 +10,12 @@ from cumulusci.tasks.robotframework.debugger import Breakpoint, Suite, Testcase,
class DebugListener(object):
"""A robot framework listener for debugging test cases
- This acts as the controller for the debugger. It is responsible for
- managing breakpoints.
+ This acts as the controller for the debugger. It is responsible
+ for managing breakpoints, and pausing execution of a test when a
+ breakpoint is hit.
- Note to self: in Breakpoint.match, "context" refers to testcase::keyword combination
+ The listener is also responsible for instantiating the debugger UI
+ (class DebuggerCli).
"""
@@ -36,28 +38,10 @@ class DebugListener(object):
def start_test(self, name, attrs):
self.stack.append(Testcase(name, attrs))
- def _break_if_breakpoint(self):
- for breakpoint in [
- bp
- for bp in self.breakpoints
- if isinstance(self.stack[-1], bp.breakpoint_type)
- ]:
- statement = "{}::{}".format(self.stack[-2].longname, self.stack[-1].name)
- if breakpoint.match(statement):
- if breakpoint.temporary:
- self.breakpoints.remove(breakpoint)
-
- self.rdb.cmdloop(
- "\n> {}\n-> {}".format(self.stack[-2].longname, str(self.stack[-1]))
- )
- return
-
def start_keyword(self, name, attrs):
- # at this point, context might be ['suite', 'subsuite', 'testcase']
-
context = Keyword(name, attrs)
self.stack.append(context)
- self._break_if_breakpoint()
+ self.break_if_breakpoint()
def end_keyword(self, name, attrs):
self.stack.pop()
@@ -71,11 +55,10 @@ class DebugListener(object):
def do_step(self):
"""Single-step through the code
- This will set a breakpoint on the next keyword in
- the current context before continuing
+ This will set a temporary breakpoint on the next keyword in
+ the current context before continuing. Once the breakpoint
+ is hit, it will be removed from the list of breakpoints.
"""
- # create new breakpoint on the next keyword in the parent of
- # the current context
breakpoint = Breakpoint(
Keyword, "{}::*".format(self.stack[-2].longname), temporary=True
)
@@ -91,3 +74,26 @@ class DebugListener(object):
breakpoint = Breakpoint(breakpoint_type, pattern, temporary)
if breakpoint not in self.breakpoints:
self.breakpoints.append(breakpoint)
+
+ def break_if_breakpoint(self):
+ """Pause test execution and issue a prompt if we are at a breakpoint"""
+
+ # filter breakpoints to only those that match the current context
+ # (eg: Suite, Testcase, Keyword), and iterate over them looking
+ # for a match.
+ for breakpoint in [
+ bp
+ for bp in self.breakpoints
+ if isinstance(self.stack[-1], bp.breakpoint_type)
+ ]:
+ statement = "{}::{}".format(self.stack[-2].longname, self.stack[-1].name)
+ if breakpoint.match(statement):
+ if breakpoint.temporary:
+ self.breakpoints.remove(breakpoint)
+
+ # Note: this call won't return until a debugger command
+ # has been issued which returns True (eg: 'continue' or 'step')
+ self.rdb.cmdloop(
+ "\n> {}\n-> {}".format(self.stack[-2].longname, str(self.stack[-1]))
+ )
+ return
|
fixup! Let dpkg.info expose package status
integration test | @@ -240,9 +240,9 @@ class PkgModuleTest(ModuleCase, SaltReturnAssertsMixin):
func = 'pkg.info_installed'
if grains['os_family'] == 'Debian':
- ret = self.run_function(func, ['bash-completion', 'dpkg'])
+ ret = self.run_function(func, ['bash', 'dpkg'])
keys = ret.keys()
- self.assertIn('bash-completion', keys)
+ self.assertIn('bash', keys)
self.assertIn('dpkg', keys)
elif grains['os_family'] == 'RedHat':
ret = self.run_function(func, ['rpm', 'bash'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.