message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
[doc]Update doc for profiling using the correct VARs
Based on code here:
Also, verified that the ENV vars as is makes "ray start" crash. | @@ -34,8 +34,8 @@ If you want to launch Ray in profiling mode, define the following variables:
.. code-block:: bash
- export RAYLET_PERFTOOLS_PATH=/usr/lib/x86_64-linux-gnu/libprofiler.so
- export RAYLET_PERFTOOLS_LOGFILE=/tmp/pprof.out
+ export PERFTOOLS_PATH=/usr/lib/x86_64-linux-gnu/libprofiler.so
+ export PERFTOOLS_LOGFILE=/tmp/pprof.out
The file ``/tmp/pprof.out`` will be empty until you let the binary run the
|
minor spelling correction
minor spelling correction in documentation | @@ -118,7 +118,7 @@ Install from GitHub
-------------------
Alternatively, install it directly from the source repository on
-GitHub. This is the "bleading edge" version, but it may be useful for
+GitHub. This is the "bleeding edge" version, but it may be useful for
accessing bug fixes and/or new features that have not been released.
Standard install
|
[core] input: add logging for input events
to debug input event issues (which seem to become more frequent), add
some input event logging.
see | @@ -5,9 +5,12 @@ import json
import uuid
import time
import select
+import logging
import threading
import bumblebee.util
+log = logging.getLogger(__name__)
+
LEFT_MOUSE = 1
MIDDLE_MOUSE = 2
RIGHT_MOUSE = 3
@@ -24,6 +27,7 @@ def read_input(inp):
"""Read i3bar input and execute callbacks"""
epoll = select.epoll()
epoll.register(sys.stdin.fileno(), select.EPOLLIN)
+ log.debug("starting click event processing")
while inp.running:
if is_terminated():
return
@@ -36,14 +40,18 @@ def read_input(inp):
line = "["
while line.startswith("["):
line = sys.stdin.readline().strip(",").strip()
+ log.debug("new event: {}".format(line))
inp.has_event = True
try:
event = json.loads(line)
if "instance" in event:
inp.callback(event)
inp.redraw()
- except ValueError:
- pass
+ else:
+ log.debug("field 'instance' missing in input, not processing the event")
+ except ValueError as e:
+ log.debug("failed to parse event: {}".format(e))
+ log.debug("exiting click event processing")
epoll.unregister(sys.stdin.fileno())
epoll.close()
inp.has_event = True
|
Order test results by decreasing date for build order
This means that "newer" results will be found first
Thus a newer "true" will beat an older "false" | @@ -1056,6 +1056,7 @@ function loadBuildOutputTable(build_info, options={}) {
'{% url "api-stock-test-result-list" %}',
{
build: build_info.pk,
+ ordering: '-date',
},
{
success: function(results) {
|
Update ebcli version in ebcli/__init__.py so that the package is ready for the next official release
cr | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-__version__ = '3.10.3'
+__version__ = '3.10.4'
|
Wait for applications to terminate on model reset
Model.reset() does not wait for all applications to terminiate
even though it does wait for all machines to terminate. This
commit fixes the issue by blocking until the application count
is zero. | @@ -845,6 +845,9 @@ class Model:
log.debug('Resetting model')
for app in self.applications.values():
await app.destroy()
+ await self.block_until(
+ lambda: len(self.applications) == 0
+ )
for machine in self.machines.values():
await machine.destroy(force=force)
await self.block_until(
|
Update jaomix.py
Cleanup comments | @@ -51,54 +51,6 @@ class JaomixCrawler(Crawler):
}
)
- # self.novel_id = soup.select_one("div.like-but")["id"]
- #
- # pages_soup = self.make_soup(
- # self.submit_form(
- # ajax_url,
- # data={
- # "action": "toc",
- # "selectall": self.novel_id,
- # },
- # )
- # )
- #
- # pages = reversed(pages_soup.select("select.sel-toc > option")[1:])
- #
- # for page in pages:
- # pageId = page["value"]
- #
- # page_soup = self.make_soup(
- # self.submit_form(
- # ajax_url,
- # data={
- # "action": "toc",
- # "page": pageId,
- # "termid": self.novel_id,
- # },
- # )
- # )
- #
- # self.parse_chapters(page_soup)
- #
- # self.parse_chapters(soup)
-
- # def parse_chapters(self, page_soup):
- # for a in reversed(page_soup.select(".hiddenstab .title > a")):
- # chap_id = 1 + len(self.chapters)
- # vol_id = 1 + len(self.chapters) // 100
- # if chap_id % 100 == 1:
- # self.volumes.append({"id": vol_id})
- #
- # self.chapters.append(
- # {
- # "id": chap_id,
- # "volume": vol_id,
- # "title": a.text.strip(),
- # "url": self.home_url.rstrip("/") + a["href"],
- # }
- # )
-
def download_chapter_body(self, chapter):
soup = self.get_soup(chapter["url"])
contents = soup.select_one(".entry-content .entry")
|
Fixed defualt.yaml
default.yaml was copied from ChIP-seq workflow. Fixed description and
parameters to match ATA-seq | ################################################################################
-# This file is the default configuration of the ChIP-seq workflow!
+# This file is the default configuration of the ATAC-seq workflow!
#
# In order to adjust some parameters, please either use the wrapper script
-# (eg. /path/to/snakemake_workflows/workflows/ChIP-seq/ChIP-seq)
+# (eg. /path/to/snakemake_workflows/workflows/ATAC-seq/ATAC-seq)
# or save a copy of this file, modify necessary parameters and then provide
# this file to the wrapper or snakmake via '--configfile' option
# (see below how to call the snakefile directly)
#
-# Own parameters will be loaded during snakefile executiuon as well and hence
+# Own parameters will be loaded during snakefile execution as well and hence
# can be used in new/extended snakemake rules!
################################################################################
## General/Snakemake parameters, only used/set by wrapper or in Snakemake cmdl, but not in Snakefile
@@ -21,13 +21,9 @@ workingdir:
## preconfigured target genomes (mm9,mm10,dm3,...) , see /path/to/snakemake_workflows/shared/organisms/
## Value can be also path to your own genome config file!
genome:
-## paired end data?
-paired: true
## Bin size of output files in bigWig format
bw_binsize: 25
-## Median/mean fragment length, only relevant for single-end data (default: 200)
atac_fragment_cutoff: 150
-fragment_length: 200
verbose: false
################################################################################
# Call snakemake directly, i.e. without using the wrapper script:
@@ -36,8 +32,8 @@ verbose: false
# via '--configfile' parameter!
# example call:
#
-# snakemake --snakefile /path/to/snakemake_workflows/workflows/ChIP-seq/Snakefile
-# --configfile /path/to/snakemake_workflows/workflows/ChIP-seq/defaults.yaml
+# snakemake --snakefile /path/to/snakemake_workflows/workflows/ATAC-seq/Snakefile
+# --configfile /path/to/snakemake_workflows/workflows/ATAC-seq/defaults.yaml
# --directory /path/to/outputdir
# --cores 32
################################################################################
|
Docs: Update cloud.rst [skip ci]
This PR updates `cloud.rst` with a minor punctuation improvements. | Cloud Deployments
=================
-To get started running Dask on common Cloud providers
-like Amazon, Google, or Microsoft
-we currently recommend deploying
+To get started running Dask on common Cloud providers like Amazon,
+Google, or Microsoft, we currently recommend deploying
:doc:`Dask with Kubernetes and Helm <kubernetes-helm>`.
All three major cloud vendors now provide managed Kubernetes services.
@@ -14,7 +13,7 @@ Data Access
-----------
You may want to install additional libraries in your Jupyter and worker images
-to access the object stores of each cloud
+to access the object stores of each cloud:
- `s3fs <https://s3fs.readthedocs.io/>`_ for Amazon's S3
- `gcsfs <https://gcsfs.readthedocs.io/>`_ for Google's GCS
|
Drop "target" terminology
It was being used inconsistently, which made it look like there was a
"module" and a "target module", when both referred to the same thing. | @@ -67,7 +67,7 @@ class WorkflowHelper(PostProcessor):
for frame in frames:
entry.stack.add_frame(frame)
- def get_frame_children(self, command, target_module, module_only=False, include_target_root=False):
+ def get_frame_children(self, command, module, module_only=False, include_root_module=False):
"""
For a form return the list of stack frame children that are required
to navigate to that form.
@@ -98,24 +98,21 @@ class WorkflowHelper(PostProcessor):
match = re.search(r'^(m\d+)-(.*)', command)
if not match:
raise Exception("Unrecognized command: {}".format(command))
- target_module_id = match.group(1)
- target_form_id = match.group(2)
-
- frame_children = []
+ module_id = match.group(1)
+ form_id = match.group(2)
+ module_command = id_strings.menu_id(module)
+ module_datums = self.get_module_datums(module_id)
+ form_datums = module_datums[form_id]
- module_command = id_strings.menu_id(target_module)
- module_datums = self.get_module_datums(target_module_id)
- form_datums = module_datums[target_form_id]
frame_children = []
-
if module_command == id_strings.ROOT:
datums_list = self._root_module_datums
else:
datums_list = list(module_datums.values()) # [ [datums for f0], [datums for f1], ...]
- root_module = target_module.root_module
- if root_module and include_target_root:
+ root_module = module.root_module
+ if root_module and include_root_module:
datums_list = datums_list + list(self.get_module_datums(id_strings.menu_id(root_module)).values())
- root_module_command = id_strings.menu_id(target_module.root_module)
+ root_module_command = id_strings.menu_id(root_module)
if root_module_command != id_strings.ROOT:
frame_children.append(CommandId(root_module_command))
frame_children.append(CommandId(module_command))
@@ -303,7 +300,7 @@ class EndOfFormNavigationWorkflow(object):
return StackFrameMeta(xpath, frame_children)
elif form_workflow == WORKFLOW_PREVIOUS:
frame_children = self.helper.get_frame_children(id_strings.form_command(form),
- module, include_target_root=True)
+ module, include_root_module=True)
# since we want to go the 'previous' screen we need to drop the last
# datum
|
Update interactive-learning.rst
I believe the proposed action should be venue rather than concert_reviews as shown below. Otherwise the text makes no sense. | @@ -120,7 +120,7 @@ possibilities (depending on the training run, it might also be correct):
concerts: None, venues: None
------
- ? The bot wants to run 'action_show_concert_reviews', correct? No
+ ? The bot wants to run 'action_show_venue_reviews', correct? No
Now we type ``n``, because it chose the wrong action, and we get a new
|
GafferArnoldUI::ArnoldShaderUI : Create default parameter labels
based on Arnold snake_case naming convention | @@ -178,9 +178,15 @@ def __translateNodeMetadata( nodeEntry ) :
__metadata[paramPath]["layout:section"] = page
# Label from OSL "label"
- label = __aiMetadataGetStr( nodeEntry, paramName, "label", label )
+ label = __aiMetadataGetStr( nodeEntry, paramName, "label" )
if label is not None :
__metadata[paramPath]["label"] = label
+ elif "_" in paramName:
+ # Label from Arnold naming convention
+ # Arnold uses snake_case rather than camelCase for naming, so translate this into
+ # nice looking names
+ __metadata[paramPath]["label"] = " ".join( [ i.capitalize() for i in paramName.split( "_" ) ] )
+
# NodeGraph visibility from Gaffer-specific metadata
|
Version changes should invalidate pytest runs.
### Problem
When changing the pytest version or when adding/removing pytest plugins, pants does not invalidate targets.
### Solution
Make sure pytest fingerprinting takes requirements into account.
### Result
Test cache is correctly invalidated if we change any of the underlying requirements for pytest. | @@ -18,7 +18,8 @@ class PyTest(Subsystem):
'--args', type=list, member_type=str,
help="Arguments to pass directly to Pytest, e.g. `--pytest-args=\"-k test_foo --quiet\"`",
)
- register('--version', default='pytest>=4.6.6,<4.7', help="Requirement string for Pytest.")
+ register('--version', default='pytest>=4.6.6,<4.7',
+ help="Requirement string for Pytest.", fingerprint=True)
register(
'--pytest-plugins',
type=list,
@@ -29,6 +30,7 @@ class PyTest(Subsystem):
"more-itertools<6.0.0 ; python_version<'3'",
],
help="Requirement strings for any plugins or additional requirements you'd like to use.",
+ fingerprint=True
)
register('--requirements', advanced=True, default='pytest>=4.6.6,<4.7',
help='Requirements string for the pytest library.',
|
Make meta=True by default for linint2
Docstring already said True is default behavior, updated code to match. | @@ -3,7 +3,7 @@ import numpy as np
import xarray as xr
from dask.array.core import map_blocks
-def linint2(fi, xo, yo, icycx, xmsg=None, meta=False, xi=None, yi=None):
+def linint2(fi, xo, yo, icycx, xmsg=None, meta=True, xi=None, yi=None):
"""Return a 2-dimensionally linearly interpolated grid.
Args:
|
mismatch bug for external datasets
adds check for None when fixing data mismatches. | @@ -254,16 +254,16 @@ def extract_images(filedata, i, png_destination, flattened_to_level, failed, is1
# Function when pydicom fails to read a value attempt to read as other types.
def fix_mismatch_callback(raw_elem, **kwargs):
try:
+ if raw_elem.VR:
values.convert_value(raw_elem.VR, raw_elem)
- except TypeError:
+ except BaseException as err:
for vr in kwargs['with_VRs']:
try:
values.convert_value(vr, raw_elem)
- except TypeError:
+ except ValueError:
pass
else:
raw_elem = raw_elem._replace(VR=vr)
- break
return raw_elem
|
Set a classname prefix for atomic styletron css classes
fix css conflict between plotly and baseweb slider | @@ -24,7 +24,7 @@ import { LightTheme, BaseProvider } from "baseui"
import { Provider as StyletronProvider } from "styletron-react"
import { SCSS_VARS } from "autogen/scssVariables"
-const engine = new Styletron()
+const engine = new Styletron({ prefix: "st-" })
const popupZIndex = Number(SCSS_VARS["$z-index-popup-menu"])
ReactDOM.render(
|
`datetime.timedelta`: Remove explicit inheritance from `SupportsAbs`
Fixes | @@ -2,7 +2,7 @@ import sys
from _typeshed import Self
from abc import abstractmethod
from time import struct_time
-from typing import ClassVar, NamedTuple, NoReturn, SupportsAbs, TypeVar, overload
+from typing import ClassVar, NamedTuple, NoReturn, TypeVar, overload
from typing_extensions import Literal, TypeAlias, final
if sys.version_info >= (3, 11):
@@ -159,7 +159,7 @@ class time:
_Date: TypeAlias = date
_Time: TypeAlias = time
-class timedelta(SupportsAbs[timedelta]):
+class timedelta:
min: ClassVar[timedelta]
max: ClassVar[timedelta]
resolution: ClassVar[timedelta]
|
Add test conditions for get_updater() and get_one_valid_targetinfo
Some of these conditions detected bugs | @@ -1839,8 +1839,38 @@ class TestMultiRepoUpdater(unittest_toolbox.Modified_TestCase):
# Restore the Root file.
shutil.move(backup_root_filepath, root_filepath)
+ # Verify that a targetinfo is not returned for a non-existent target.
+ multi_repo_updater.map_file['mapping'][1]['terminating'] = False
self.assertRaises(tuf.exceptions.UnknownTargetError,
multi_repo_updater.get_one_valid_targetinfo, 'non-existent.txt')
+ multi_repo_updater.map_file['mapping'][1]['terminating'] = True
+
+ # Test for a mapping that sets terminating = True, and that occurs before
+ # the final mapping.
+ multi_repo_updater.map_file['mapping'][0]['terminating'] = True
+ self.assertRaises(tuf.exceptions.UnknownTargetError,
+ multi_repo_updater.get_one_valid_targetinfo, 'bad3.txt')
+
+
+
+ def test_get_updater(self):
+ map_file = os.path.join(self.client_directory, 'map.json')
+ multi_repo_updater = updater.MultiRepoUpdater(map_file)
+
+ # Test for a non-existent repository name.
+ self.assertEqual(None, multi_repo_updater.get_updater('bad_repo_name',
+ multi_repo_updater.map_file['repositories']))
+
+ # Test get_updater indirectly via the "private" _update_from_repository().
+ self.assertRaises(tuf.exceptions.Error,
+ multi_repo_updater._update_from_repository, 'bad_repo_name',
+ multi_repo_updater.map_file['repositories'],
+ 'file3.txt')
+
+ # Test for a repository that doesn't exist.
+ multi_repo_updater.map_file['repositories']['bad_repo_name'] = ['https://bogus:30002']
+ self.assertEqual(None, multi_repo_updater.get_updater('bad_repo_name',
+ multi_repo_updater.map_file['repositories']))
|
Update mediaprocessor.py
move purge before default track selection
prioritize default source track if present (copy is higher priority) | @@ -487,6 +487,7 @@ class MediaProcessor:
same_codec_options = [x for x in same_channel_options if Converter.codec_name_to_ffprobe_codec_name("audio", x['codec']) == codec or (x['codec'] == 'copy' and self.getSourceStream(x['map'], info).codec == codec)]
if len(same_codec_options) > 1:
same_codec_options.sort(key=lambda x: x['bitrate'], reverse=True)
+ same_codec_options.sort(key=lambda x: self.getSourceStream(x['map'], info).disposition['default'], reverse=True)
same_codec_options.sort(key=lambda x: x['codec'] == "copy", reverse=True)
purge.extend(same_codec_options[1:])
self.log.debug("Purge the following streams: %s." % purge)
@@ -905,6 +906,13 @@ class MediaProcessor:
blocked_audio_languages.append(a.metadata['language'])
self.log.debug("Blocking further %s audio streams to prevent multiple streams of the same language [audio-first-stream-of-language]." % a.metadata['language'])
+ # Purge Duplicate Streams
+ if self.purgeDuplicateStreams(acombinations, audio_settings, info):
+ try:
+ self.sortStreams(audio_settings, awl)
+ except:
+ self.log.exception("Error sorting output stream options [sort-streams].")
+
# Set Default Audio Stream
try:
self.setDefaultAudioStream(audio_settings)
@@ -1063,12 +1071,6 @@ class MediaProcessor:
}
attachments.append(attachment)
- if self.purgeDuplicateStreams(acombinations, audio_settings, info):
- try:
- self.sortStreams(audio_settings, awl)
- except:
- self.log.exception("Error sorting output stream options [sort-streams].")
-
# Collect all options
options = {
'source': sources,
|
Update README.rst
Specify how to install `stanfordnlp` models so that tests pass. | @@ -41,6 +41,7 @@ Use ``pyenv`` to manage Python versions and ``poetry`` for package builds.
- ``$ pyenv local cltkv1``. Open a new window and this should be activated, with a ``(cltkv1) `` prepended to your Bash prompt.
* Install ``poetry`` to support packaging: https://poetry.eustace.io/docs/
* Install dependencies in ``poetry.lock``: ``$ poetry install``
+* Install Stanford NLP models: ``$ poetry run python scripts/download_misc_dependencies.py''
* Install Graphiz (necessary for building docs): https://graphviz.gitlab.io/download/
|
Changes ColorBoxPlot to default to "boxLabels=False".
This seems reasonable given that it takes so long to render lots of
box labels, and this information is (by default) available via the
hover info. | @@ -765,7 +765,7 @@ class BoxKeyPlot(WorkspacePlot):
class ColorBoxPlot(WorkspacePlot):
def __init__(self, ws, plottype, gss, dataset, gateset,
- sumUp=False, boxLabels=True, hoverInfo=True, invert=False,
+ sumUp=False, boxLabels=False, hoverInfo=True, invert=False,
prec='compact', linlg_pcntle=.05, minProbClipForWeighting=1e-4,
directGSTgatesets=None, scale=1.0):
"""
|
print_counters
`self.logger.info("Total {}: {}/{}".format(key, val, self.max_per_day[key]))` | @@ -256,7 +256,7 @@ class Bot(object):
def print_counters(self):
for key, val in self.total.items():
if val > 0:
- self.logger.info("Total {}: {}".format(key, val))
+ self.logger.info("Total {}: {}/{}".format(key, val, self.max_per_day[key]))
self.logger.info("Total requests: {}".format(self.api.total_requests))
def delay(self, key):
|
Add warning when no credentials found. Credential detection can take a very long time to fail.
Change copy as suggested by | @@ -139,7 +139,10 @@ func getSessionOrDefaultCreds(profile string) map[string]string {
opts.Profile = profile
// Obtain AWS credentials and pass them through to the container runtime via env variables
if sess, err := session.NewSessionWithOptions(opts); err == nil {
- if creds, err := sess.Config.Credentials.Get(); err == nil {
+ creds, err := sess.Config.Credentials.Get()
+ if err != nil {
+ log.Printf("WARNING: No AWS credentials found. Missing credentials may lead to slow startup times as detailed in https://github.com/awslabs/aws-sam-local/issues/134")
+ } else {
if *sess.Config.Region != "" {
result["region"] = *sess.Config.Region
}
|
Update broad-crawls.rst
Added section on how to treat memory consumption problems of broad crawls. | @@ -174,3 +174,32 @@ It is turned OFF by default because it has some performance overhead,
and enabling it for focused crawls doesn't make much sense.
.. _ajax crawlable: https://developers.google.com/webmasters/ajax-crawling/docs/getting-started
+
+Reducing memory consumption for broad crawls
+============================================
+
+For broad crawls, the amount of memory used for storing `Requests`_, references, and further information may soon become pretty large.
+The following steps help to reduce the amount of memory used for broad crawls.
+
+1) **Change the queue type:** The default queue for crawls is "Last-In-First-Out ( `LIFO`_ )" using the concept of "Depth-First Search ( `DFS`_ )". In case the page scraping is faster than the processing of the spiders, early `Requests`_ might not be processed and therefore block memory until the final depth is reached. Setting the queue from LIFO to "First-In-First-Out ( `FIFO`_ )" and setting dispatching from `DFS`_ to "Breadth-First Search ( `BFS`_ )", as shown in the `FAQ`_ will solve this problem::
+
+ DEPTH_PRIORITY = 1
+ SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'
+ SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue'
+
+2) **Reduce the number of concurrent requests:** As stated before, the global concurrency level can be set using::
+
+ CONCURRENT_REQUESTS = 100
+
+However, if scraping is faster than processing, the queue will eventually exceed the memory size.
+Unfortunately, there is yet no autobalancing feature available, so you need to find concurrency values that fit your processing speed.
+
+3) **Use the profiling and trackref capabilities of scrapy:** scrapy provides an own and interactive profiling and reference tracking tool. See `debugging memory leaks`_ for more information.
+
+.. _debugging memory leaks: http://doc.scrapy.org/en/latest/topics/leaks.html
+.. _Requests: http://doc.scrapy.org/en/latest/topics/request-response.html#request-objects
+.. _LIFO: http://en.wikipedia.org/wiki/Stack_(abstract_data_type)
+.. _FIFO: http://en.wikipedia.org/wiki/FIFO_(computing_and_electronics)
+.. _BFS: http://en.wikipedia.org/wiki/Breadth-first_search
+.. _DFS: http://en.wikipedia.org/wiki/Depth-first_search
+.. _FAQ: http://doc.scrapy.org/en/latest/faq.html#does-scrapy-crawl-in-breadth-first-or-depth-first-order
|
fix: adjust to new solution object
Fluxes are now default Series so allow them to be as input to room and moma | @@ -81,7 +81,7 @@ def fba(model, objective=None, reactions=None, *args, **kwargs):
assert_optimal(model)
solution = get_solution(model)
if reactions is not None:
- result = FluxDistributionResult({r: solution.get_primal_by_id(r) for r in reactions}, solution.f)
+ result = FluxDistributionResult({r: solution[r] for r in reactions}, solution.f)
else:
result = FluxDistributionResult.from_solution(solution)
return result
@@ -235,7 +235,7 @@ def lmoma(model, reference=None, cache=None, reactions=None, *args, **kwargs):
cache.begin_transaction()
- if not isinstance(reference, (dict, FluxDistributionResult)):
+ if not isinstance(reference, (dict, pandas.Series, FluxDistributionResult)):
raise TypeError("reference must be a flux distribution (dict or FluxDistributionResult")
try:
@@ -337,7 +337,7 @@ def room(model, reference=None, cache=None, delta=0.03, epsilon=0.001, reactions
cache.begin_transaction()
- if not isinstance(reference, (dict, FluxDistributionResult)):
+ if not isinstance(reference, (dict, pandas.Series, FluxDistributionResult)):
raise TypeError("reference must be a flux distribution (dict or FluxDistributionResult")
try:
@@ -438,7 +438,7 @@ class FluxDistributionResult(Result):
@property
def data_frame(self):
- return pandas.DataFrame(list(self._fluxes.values()), index=list(self._fluxes.keys()), columns=['flux'])
+ return pandas.DataFrame(list(self._fluxes.values), index=list(self._fluxes.keys()), columns=['flux'])
@property
def fluxes(self):
|
Schedule build every day of the week.
Starts 3 hours before the SDG daily build: | -String cron_string = BRANCH_NAME == "master" ? "H 12 * * 1,3,5" : ""
+String cron_string = BRANCH_NAME == "master" ? "H 12 * * 1-5" : "" // Mon-Fri at noon UTC, 8am EST, 5am PDT
pipeline {
agent { label 'ephemeral-linux' }
|
[IMPR] use a sentinel variable to determine the end of an iterable
This enables None to be part of the iterables chain | @@ -1029,9 +1029,11 @@ def roundrobin_generators(*iterables):
:return: the combined generator of iterables
:rtype: generator
"""
+ sentinel = object()
return (item
- for item in itertools.chain.from_iterable(zip_longest(*iterables))
- if item is not None)
+ for item in itertools.chain.from_iterable(
+ zip_longest(*iterables, fillvalue=sentinel))
+ if item is not sentinel)
def filter_unique(iterable, container=None, key=None, add=None):
|
[hail] use TLS to speak to maven repos
Protectes from (unlikely) man in the middle attacks on our infrastructure. | @@ -22,8 +22,8 @@ repositories {
mavenCentral()
jcenter()
maven { url "https://repository.cloudera.com/artifactory/cloudera-repos/" }
- maven { url "http://repo.hortonworks.com/content/repositories/releases/" }
- maven { url "http://repo.spring.io/plugins-release/" }
+ maven { url "https://repo.hortonworks.com/content/repositories/releases/" }
+ maven { url "https://repo.spring.io/plugins-release/" }
}
String sparkVersion = System.getProperty("spark.version", "2.4.0")
|
feat(ldap): ldap search string validation
ldap search string is user input. validate to ensure is enclosed in '()', has the '{0}' placeholder and has the same number of brackets as used in complex ldap search strings.
issue | @@ -13,13 +13,12 @@ class LDAPSettings(Document):
return
if not self.flags.ignore_mandatory:
- if not self.ldap_search_string.startswith('('):
- self.ldap_search_string = '(' + self.ldap_search_string
- if not self.ldap_search_string.endswith(')'):
- self.ldap_search_string = self.ldap_search_string + ')'
-
- if self.ldap_search_string and "{0}" in self.ldap_search_string:
+ if self.ldap_search_string.count('(') == self.ldap_search_string.count(')') and \
+ self.ldap_search_string.startswith('(') and \
+ self.ldap_search_string.endswith(')') and \
+ self.ldap_search_string and \
+ "{0}" in self.ldap_search_string:
self.connect_to_ldap(base_dn=self.base_dn, password=self.get_password(raise_exception=False))
else:
|
puppeteer: Replace `hidden: false` with `visible: true`.
I had a misconception with hidden and visible options
and thought `hidden: false` was same as `visible: true`
and other way too.
But `hidden: false` or `visible: false` does nothing
more than checking if the selector exists.
Also, to mention, `visible: false`'s were fixed in | @@ -98,12 +98,12 @@ async function test_reply_with_r_shortcut(page) {
}
async function test_open_close_compose_box(page) {
- await page.waitForSelector("#stream-message", {hidden: false});
+ await page.waitForSelector("#stream-message", {visible: true});
await close_compose_box(page);
await page.waitForSelector("#stream-message", {hidden: true});
await page.keyboard.press("KeyX");
- await page.waitForSelector("#private-message", {hidden: false});
+ await page.waitForSelector("#private-message", {visible: true});
await close_compose_box(page);
await page.waitForSelector("#private-message", {hidden: true});
}
|
Use globally defined `device` instead of passing as arg
The PR reduces the number of args in functions by using globally defined `device` variable. | @@ -47,9 +47,9 @@ def cross_validation_with_val_set(dataset,
t_start = time.perf_counter()
for epoch in range(1, epochs + 1):
- train_loss = train(model, optimizer, train_loader, device)
- val_losses.append(eval_loss(model, val_loader, device))
- accs.append(eval_acc(model, test_loader, device))
+ train_loss = train(model, optimizer, train_loader)
+ val_losses.append(eval_loss(model, val_loader))
+ accs.append(eval_acc(model, test_loader))
eval_info = {
'fold': fold,
'epoch': epoch,
@@ -110,7 +110,7 @@ def num_graphs(data):
return data.x.size(0)
-def train(model, optimizer, loader, device):
+def train(model, optimizer, loader):
model.train()
total_loss = 0
@@ -125,7 +125,7 @@ def train(model, optimizer, loader, device):
return total_loss / len(loader.dataset)
-def eval_acc(model, loader, device):
+def eval_acc(model, loader):
model.eval()
correct = 0
@@ -137,7 +137,7 @@ def eval_acc(model, loader, device):
return correct / len(loader.dataset)
-def eval_loss(model, loader, device):
+def eval_loss(model, loader):
model.eval()
loss = 0
|
m1n1.constructutils: Keep a global addr->struct map
This allows easily identifying pointers to the beginning of structs. | @@ -8,6 +8,7 @@ from construct.lib import HexDisplayedInteger
from .utils import *
g_struct_trace = set()
+g_struct_addrmap = {}
g_depth = 0
def recusive_reload(obj, token=None):
@@ -80,6 +81,10 @@ def str_value(value, repr=False):
if isinstance(value, DecDisplayedInteger):
return str(value)
if isinstance(value, int):
+ if value in g_struct_addrmap:
+ desc = g_struct_addrmap[value]
+ return f"{value:#x} ({desc})"
+ else:
return f"{value:#x}"
if isinstance(value, ListContainer):
if len(value) <= 16:
@@ -319,7 +324,9 @@ class ConstructClassBase(Reloadable, metaclass=ReloadableConstructMeta):
self._apply(obj)
+ if self._addr > 0x10000:
g_struct_trace.add((self._addr, f"{cls.name} (end: {self._addr + size:#x})"))
+ g_struct_addrmap[self._addr] = f"{cls.name}"
return self
@classmethod
|
[swarming] remove command from local_smoke_test
Also fix non-deterministic behavior. | @@ -69,7 +69,6 @@ DEFAULT_COMMAND = ["python", "-u", "%s.py" % HELLO_WORLD]
# The default isolated command is to map and run HELLO_WORLD.
DEFAULT_ISOLATE_HELLO = """{
"variables": {
- "command": ["python", "-u", "%(name)s.py"],
"files": ["%(name)s.py"],
},
}""" % {
@@ -999,7 +998,7 @@ class Test(unittest.TestCase):
u'initial_size': unicode(sum(items_in)),
u'items_cold': [],
# Items are hot.
- u'items_hot': items_in,
+ u'items_hot': sorted(items_in),
u'num_items_hot': unicode(len(items_in)),
u'total_bytes_items_hot': unicode(sum(items_in)),
},
|
Expand namespaces automatically when there is only one namespace
Make namespace list expand by default if only one namespace is available | @@ -21,11 +21,16 @@ import { ToolbarConfig } from 'patternfly-ng/toolbar/toolbar-config';
import { ToolbarView } from 'patternfly-ng/toolbar/toolbar-view';
import { IMe } from '../../auth/auth.service';
import { AuthService } from '../../auth/auth.service';
-import { Namespace } from '../../resources/namespaces/namespace';
import { NamespaceService } from '../../resources/namespaces/namespace.service';
import { PagedResponse } from '../../resources/paged-response';
import { AddRepositoryModalComponent } from '../add-repository-modal/add-repository-modal.component';
+import { Namespace as VanillaNamespace } from '../../resources/namespaces/namespace';
+
+class Namespace extends VanillaNamespace {
+ expanded: boolean;
+}
+
@Component({
encapsulation: ViewEncapsulation.None,
selector: 'namespace-list',
@@ -154,6 +159,9 @@ export class NamespaceListComponent implements OnInit {
this.me = data['me'];
this.items = this.prepForList(results.results as Namespace[]);
this.filterConfig.resultsCount = results.count;
+ if (results.count === 1) {
+ this.items[0].expanded = true;
+ }
this.paginationConfig.totalItems = results.count;
this.pageLoading = false;
});
|
Fix test_multiple_hooks test by adding retry
* Without the retry the test might fail if it ends before the hook of the
workflow_succeeded ran. | # * limitations under the License.
import os
-import yaml
import uuid
import tempfile
+import yaml
+from retrying import retry
+
from integration_tests.tests import utils
from integration_tests.framework import docl
from integration_tests import AgentlessTestCase
-
from integration_tests.tests.utils import upload_mock_plugin
@@ -292,6 +293,7 @@ hooks:
utils.wait_for_deployment_creation_to_complete(deployment_id)
return deployment_id
+ @retry(wait_fixed=1000, stop_max_attempt_number=3)
def _assert_messages_in_log(self, messages, log_path=LOG_PATH):
tmp_log_path = os.path.join(self.workdir, 'test_log')
docl.copy_file_from_manager(log_path, tmp_log_path)
|
Fix TypeError
Response.text is a string, Response.content is bytes | @@ -52,7 +52,7 @@ def get_feed_xml(requests, feed_name, page):
resp = requests.get(feed_url)
if (
resp.status_code == 500
- and 'AtomFeedRuntimeException: feed does not exist' in resp.content
+ and 'AtomFeedRuntimeException: feed does not exist' in resp.text
):
exception = OpenmrsFeedDoesNotExist(
f'Domain "{requests.domain_name}": Page does not exist in atom '
|
Partially revert "llvm, functions/LinearMatrix: Add value shape workaround for MappingProjection"
This reverts commit
The workaround is no longer necessary after:
("Component: if applicable, get
matrix from parameter state when instantiating function") | @@ -4662,15 +4662,6 @@ class LinearMatrix(TransferFunction): # ---------------------------------------
def get_param_ids(self):
return [MATRIX]
- def get_output_struct_type(self):
- default_val = self.instance_defaults.value
- # FIXME: PathwayProjection does not initialize default value correctly
- from psyneulink.components.projections.pathway.mappingprojection import MappingProjection
- if self.owner and isinstance(self.owner, MappingProjection):
- default_val = self.owner.instance_defaults.value
- with pnlvm.LLVMBuilderContext() as ctx:
- return pnlvm._convert_python_struct_to_llvm_ir(ctx, default_val)
-
def _gen_llvm_function_body(self, ctx, builder, params, _, arg_in, arg_out):
# Restrict to 1d arrays
assert self.instance_defaults.variable.ndim == 1
|
Update run_classifier_multi_label_infer.py
run_classifier_multi_label_infer.py add output_logits and output_prob | @@ -32,6 +32,9 @@ def main():
tokenizer_opts(parser)
+ parser.add_argument("--output_logits", action="store_true", help="Write logits to output file.")
+ parser.add_argument("--output_prob", action="store_true", help="Write probabilities to output file.")
+
args = parser.parse_args()
# Load the hyperparameters from the config file.
@@ -65,7 +68,12 @@ def main():
model.eval()
with open(args.prediction_path, mode="w", encoding="utf-8") as f:
- f.write("label\n")
+ f.write("label")
+ if args.output_logits:
+ f.write("\t" + "logits")
+ if args.output_prob:
+ f.write("\t" + "prob")
+ f.write("\n")
for i, (src_batch, seg_batch) in enumerate(batch_loader(batch_size, src, seg)):
src_batch = src_batch.to(device)
seg_batch = seg_batch.to(device)
@@ -74,13 +82,19 @@ def main():
probs_batch = nn.Sigmoid()(logits)
probs_batch = probs_batch.cpu().numpy().tolist()
+ logits = logits.cpu().numpy().tolist()
- for prob in probs_batch:
+ for i,prob in enumerate(probs_batch):
label = list()
for j in range(len(prob)):
if prob[j] > 0.5:
label.append(str(j))
- f.write(",".join(label) + "\n")
+ f.write(",".join(label))
+ if args.output_logits:
+ f.write("\t" + " ".join([str(v) for v in logits[i]]))
+ if args.output_prob:
+ f.write("\t" + " ".join([str(v) for v in prob]))
+ f.write("\n")
if __name__ == "__main__":
|
Fix `unit.modules.test_znc` for Windows
Mock the signal object as it's missing SIGUSR1 and SIGHUP on Windows | @@ -54,7 +54,8 @@ class ZncTestCase(TestCase, LoaderModuleMockMixin):
Tests write the active configuration state to config file
'''
mock = MagicMock(return_value='SALT')
- with patch.dict(znc.__salt__, {'ps.pkill': mock}):
+ with patch.dict(znc.__salt__, {'ps.pkill': mock}), \
+ patch.object(znc, 'signal', MagicMock()):
self.assertEqual(znc.dumpconf(), 'SALT')
# 'rehashconf' function tests: 1
@@ -64,7 +65,8 @@ class ZncTestCase(TestCase, LoaderModuleMockMixin):
Tests rehash the active configuration state from config file
'''
mock = MagicMock(return_value='SALT')
- with patch.dict(znc.__salt__, {'ps.pkill': mock}):
+ with patch.dict(znc.__salt__, {'ps.pkill': mock}), \
+ patch.object(znc, 'signal', MagicMock()):
self.assertEqual(znc.rehashconf(), 'SALT')
# 'version' function tests: 1
|
Pass references when not consuming argument
When the argument to a function is not consumed in that function, it is
better to pass a reference. | * ------------------------------------------------------------------------------
*/
-#![allow(unknown_lints)]
-
use protobuf;
use protobuf::{Message as ProtobufMessage, ProtobufError};
use rand;
@@ -85,9 +83,9 @@ impl ZmqDriver {
let driver_thread = thread::spawn(move || {
driver_loop(
update_sender,
- self.stop_receiver,
+ &self.stop_receiver,
validator_sender,
- validator_receiver,
+ &validator_receiver,
)
});
@@ -121,12 +119,11 @@ impl Stop {
}
}
-#[allow(needless_pass_by_value)]
fn driver_loop(
mut update_sender: Sender<Update>,
- stop_receiver: Receiver<()>,
+ stop_receiver: &Receiver<()>,
mut validator_sender: ZmqMessageSender,
- validator_receiver: Receiver<Result<Message, ReceiveError>>,
+ validator_receiver: &Receiver<Result<Message, ReceiveError>>,
) -> Result<(), Error> {
loop {
match validator_receiver.recv_timeout(Duration::from_millis(100)) {
|
tv4play: fixed a crash if someone tried to download a geoblock file
fixed: | @@ -73,6 +73,9 @@ class Tv4play(Service, OpenGraphThumbMixin):
url = "https://playback-api.b17g.net/media/{}?service=tv4&device=browser&protocol=hls%2Cdash&drm=widevine".format(vid)
res = self.http.request("get", url, cookies=self.cookies)
+ if res.status_code > 200:
+ yield ServiceError("Can't play this because the video is geoblocked.")
+ return
if res.json()["playbackItem"]["type"] == "hls":
streams = hlsparse(self.config, self.http.request("get", res.json()["playbackItem"]["manifestUrl"]),
res.json()["playbackItem"]["manifestUrl"], output=self.output, httpobject=self.http)
@@ -136,6 +139,9 @@ class Tv4(Service, OpenGraphThumbMixin):
url = "https://playback-api.b17g.net/media/{}?service=tv4&device=browser&protocol=hls%2Cdash&drm=widevine".format(self.output["id"])
res = self.http.request("get", url, cookies=self.cookies)
+ if res.status_code > 200:
+ yield ServiceError("Can't play this because the video is geoblocked.")
+ return
if res.json()["playbackItem"]["type"] == "hls":
streams = hlsparse(self.config, self.http.request("get", res.json()["playbackItem"]["manifestUrl"]),
res.json()["playbackItem"]["manifestUrl"], output=self.output)
|
Update Nintendo - Super Nintendo Entertainment System.dat
Added homepage to Banzai mario world | @@ -11,6 +11,7 @@ game (
game (
name "Banzai Mario World (USA)"
description "'Super Mario World' hack by GbreezeSunset"
+ homepage "https://www.smwcentral.net/?p=section&a=details&id=11477"
rom ( name "Banzai Mario World.sfc" size 2097152 crc 922d3660 md5 1daa2333725eec3e0f0ec077c2395755 sha1 ddc5df0b72a3a7b8e4904649087991ad3ab694d8 )
)
game (
|
Update codeinput.kv
Demo Showcasing code input changed sample code to Python 3 | @@ -5,5 +5,5 @@ ShowcaseScreen:
CodeInput:
padding: '4dp'
- text: 'class Hello(object):\n\tpass\n\nprint "Hello world"'
+ text: 'class Hello(object):\n\tpass\n\nprint("Hello world")'
focus: True if root.parent else False
|
Don't notify batch observers unless batch is new
Updates the publisher to only notify batch observers if the batch is new
(is not already in the queue). | @@ -468,9 +468,6 @@ impl SyncBlockPublisher {
pub fn on_batch_received(&self, batch: Batch) {
let mut state = self.state.write().expect("Lock should not be poisoned");
- for observer in &state.batch_observers {
- observer.notify_batch_pending(&batch);
- }
// Batch can be added if the signer is authorized and the batch isn't already committed
let can_add_batch = {
@@ -497,6 +494,11 @@ impl SyncBlockPublisher {
if can_add_batch {
// If the batch is already in the pending queue, don't do anything further
if state.pending_batches.append(batch.clone()) {
+ // Notify observers
+ for observer in &state.batch_observers {
+ observer.notify_batch_pending(&batch);
+ }
+ // If currently building a block, add the batch to it
if let Some(ref mut candidate_block) = state.candidate_block {
if candidate_block.can_add_batch() {
candidate_block.add_batch(batch);
|
Update german numbers of netto installation
Updated the numbers of netto installed electricity in Germany.
My source came from "Frauenhofer ISE" the data is from July 2018
Source link: | ]
],
"capacity": {
- "biomass": 9501,
- "coal": 46336,
- "gas": 29731,
+ "biomass": 7400,
+ "coal": 46250,
+ "gas": 29550,
"geothermal": 39,
"hydro": 5490,
"hydro storage": 9440,
"nuclear": 9516,
"oil": 4437,
- "solar": 40716,
+ "solar": 44320,
"unknown": 3137,
- "wind": 49592
+ "wind": 58190
},
"contributors": [
"https://github.com/corradio"
|
Update dynamic_step_driver.py
back_prop=False is deprecated. So updated it using tf.stop_gradient. | @@ -193,12 +193,11 @@ class DynamicStepDriver(driver.Driver):
self.env.time_step_spec())
counter = tf.zeros(batch_dims, tf.int32)
- [_, time_step, policy_state] = tf.while_loop(
+ [_, time_step, policy_state] = tf.nest.map_structure(tf.stop_gradient,tf.while_loop(
cond=self._loop_condition_fn(),
body=self._loop_body_fn(),
loop_vars=[counter, time_step, policy_state],
- back_prop=False,
parallel_iterations=1,
maximum_iterations=maximum_iterations,
- name='driver_loop')
+ name='driver_loop'))
return time_step, policy_state
|
message: de-duplicate reaction type conversion
Removes some duplicate code in Message.{add,remove}_reaction.
The code in question converts the emoji object from Reaction, Emoji, str, or PartialEmoji
to a string form suitable for sending over the wire. | @@ -675,18 +675,7 @@ class Message:
The emoji parameter is invalid.
"""
- if isinstance(emoji, Reaction):
- emoji = emoji.emoji
-
- if isinstance(emoji, Emoji):
- emoji = '%s:%s' % (emoji.name, emoji.id)
- elif isinstance(emoji, PartialEmoji):
- emoji = emoji._as_reaction()
- elif isinstance(emoji, str):
- pass # this is okay
- else:
- raise InvalidArgument('emoji argument must be str, Emoji, or Reaction not {.__class__.__name__}.'.format(emoji))
-
+ emoji = self._emoji_reaction(emoji)
await self._state.http.add_reaction(self.id, self.channel.id, emoji)
async def remove_reaction(self, emoji, member):
@@ -721,23 +710,26 @@ class Message:
The emoji parameter is invalid.
"""
- if isinstance(emoji, Reaction):
- emoji = emoji.emoji
-
- if isinstance(emoji, Emoji):
- emoji = '%s:%s' % (emoji.name, emoji.id)
- elif isinstance(emoji, PartialEmoji):
- emoji = emoji._as_reaction()
- elif isinstance(emoji, str):
- pass # this is okay
- else:
- raise InvalidArgument('emoji argument must be str, Emoji, or Reaction not {.__class__.__name__}.'.format(emoji))
+ emoji = self._emoji_reaction(emoji)
if member.id == self._state.self_id:
await self._state.http.remove_own_reaction(self.id, self.channel.id, emoji)
else:
await self._state.http.remove_reaction(self.id, self.channel.id, emoji, member.id)
+ @staticmethod
+ def _emoji_reaction(emoji):
+ if isinstance(emoji, Reaction):
+ return emoji.emoji
+ if isinstance(emoji, Emoji):
+ return '%s:%s' % (emoji.name, emoji.id)
+ if isinstance(emoji, PartialEmoji):
+ return emoji._as_reaction()
+ if isinstance(emoji, str):
+ return emoji # this is okay
+
+ raise InvalidArgument('emoji argument must be str, Emoji, or Reaction not {.__class__.__name__}.'.format(emoji))
+
async def clear_reactions(self):
"""|coro|
|
web: InternalContentHandler logs exceptions
Maybe it should actually log as an exception? | @@ -748,6 +748,7 @@ class InternalContentHandler (SplitRequestHandler):
self.do_response(False)
def do_response (self, is_get):
+ path = "<Unknown>"
try:
path = self.path.lstrip("/").replace("/","__").replace(".","_")
r = getattr(self, "GET_" + path, None)
@@ -793,8 +794,11 @@ class InternalContentHandler (SplitRequestHandler):
else:
ct = "text/plain"
if isinstance(r, str): r = r.encode()
- except Exception:
+ except Exception as exc:
self.send_error(500, "Internal server error")
+ msg = "%s failed trying to get '%s'" % (type(self).__name__, path)
+ if str(exc): msg += ": " + str(exc)
+ log.debug(msg)
return
self.send_response(200)
|
Update migration guide for RunConfig
Test Plan: docs only
Reviewers: alangenfeld, prha | @@ -83,7 +83,7 @@ Error:
Fix:
-Find any references to context.resources.<resource_name>, and ensure that the enclosing
+Find any references to `context.resources.<resource_name>`, and ensure that the enclosing
solid definition, type definition, or config function has the resource key specified
in its `required_resource_key` argument.
@@ -96,3 +96,16 @@ its required resources.
As a result, we should see improved performance for pipeline subset execution,
multiprocess execution, and retry execution.
+
+## RunConfig Removed
+
+Error:
+
+`AttributeError: 'ComputeExecutionContext' object has no attribute 'run_config'`
+
+Fix:
+
+Replace all references to `context.run_config` with `context.pipeline_run`. The `run_config` field
+on the pipeline execution context has been removed and replaced with `pipeline_run`, a `PipelineRun`
+instance. Along with the fields previously on `RunConfig`, this also includes the pipeline run
+status.
|
Update netrxd.py
Fixed bug in the update (failing when no "state" were present) | @@ -530,10 +530,14 @@ def _addRates(self, params):
if 'regions' not in param:
# param['regions'] = [None]
# Following Craig's suggestion (in concordance with the default in NEURON)
- param['regions'] = [self.rxd['species'][species]['regions'] for species in self.rxd['species'].keys() if param['species']==species] + \
- [self.rxd['states'][states]['regions'] for states in self.rxd['states'].keys() if param['species']==states ]
+ try:
+ param['regions'] = [self.rxd['species'][species]['regions'] for species in self.rxd['species'].keys() if param['species']==species]
+ if 'states' in self.rxd:
+ param['regions'] = param['regions'] + [self.rxd['states'][states]['regions'] for states in self.rxd['states'].keys() if param['species']==states ]
if len(param['regions'])==1 and isinstance(param['regions'][0],list):
param['regions'] = [val for elem in param['regions'] for val in elem]
+ except:
+ param['regions'] = [None]
elif not isinstance(param['regions'], list):
param['regions'] = [param['regions']]
try:
|
Need openbsd-netcat in Arch Linux
Similar to | @@ -52,7 +52,7 @@ case "$distro" in
echo "Updating mirrors"
pacman -Sy &> $LOG_DIR/pacman_update.log
echo "|-- running pacman install...."
- yes | pacman -S git libtool m4 automake curl pkg-config python python-pip libffi make autoconf gcc10 sudo inetutils bc
+ yes | pacman -S git libtool m4 automake curl pkg-config python python-pip libffi make autoconf gcc10 sudo inetutils bc openbsd-netcat
;;
freebsd*)
echo "Updating mirros"
|
ImageNode : Remove context asserts
The new ContextSanitiser is a more convenient way of achieving the same thing. | @@ -118,31 +118,18 @@ void ImageNode::hash( const Gaffer::ValuePlug *output, const Gaffer::Context *co
}
else if( output == imagePlug->formatPlug() )
{
- // The asserts in these 4 conditionals can be uncommented to catch anywhere that these
- // global plugs are being hashed with unnecessary tile specific context. This can be
- // a performance hazard because it means we can't reuse hashes from the hash cache
- // nearly as effectively
-
- //assert( context->get<std::string>( ImagePlug::channelNameContextName, "UNDEF" ) == "UNDEF" );
- //assert( context->get<Imath::V2i>( ImagePlug::tileOriginContextName, V2i( 42 ) ) == V2i( 42 ) );
hashFormat( imagePlug, context, h );
}
else if( output == imagePlug->dataWindowPlug() )
{
- //assert( context->get<std::string>( ImagePlug::channelNameContextName, "UNDEF" ) == "UNDEF" );
- //assert( context->get<Imath::V2i>( ImagePlug::tileOriginContextName, V2i( 42 ) ) == V2i( 42 ) );
hashDataWindow( imagePlug, context, h );
}
else if( output == imagePlug->metadataPlug() )
{
- //assert( context->get<std::string>( ImagePlug::channelNameContextName, "UNDEF" ) == "UNDEF" );
- //assert( context->get<Imath::V2i>( ImagePlug::tileOriginContextName, V2i( 42 ) ) == V2i( 42 ) );
hashMetadata( imagePlug, context, h );
}
else if( output == imagePlug->channelNamesPlug() )
{
- //assert( context->get<std::string>( ImagePlug::channelNameContextName, "UNDEF" ) == "UNDEF" );
- //assert( context->get<Imath::V2i>( ImagePlug::tileOriginContextName, V2i( 42 ) ) == V2i( 42 ) );
hashChannelNames( imagePlug, context, h );
}
}
|
Update magentocore.txt
Minus dups. | @@ -3290,3 +3290,26 @@ artichgroup.com
jquerylib-min.com
jquerylib-min.net
onlinecdn-js.com
+
+# Reference: https://www.riskiq.com/resources/research/magecart-ant-and-cockroach-skimmer/
+
+2binary-education.pw
+ads2.adverline.com/retargetproduit/partntertag/103754_tag.js
+alexa-rank.pw
+batbing.com
+bgznnfzn.pw
+checkip.biz
+consoler.in
+gnwnprnf.pw
+niywqcnp.pw
+pornostyle.pw
+portal-a.pw
+portal-b.pw
+portal-c.pw
+portal-d.pw
+portal-e.pw
+portal-f.pw
+search-components.pw
+sexrura.pw
+tattoopad.pw
+xnprnfzn.pw
|
[modules/shell] remove obsolete event handlers
modules are now automatically updated when clicked.
fixes | @@ -50,10 +50,6 @@ class Module(core.module.Module):
if self.parameter("scrolling.makewide") is None:
self.set("scrolling.makewide", False)
- # LMB and RMB will update output regardless of timer
- core.input.register(self, button=core.input.LEFT_MOUSE, cmd=self.update)
- core.input.register(self, button=core.input.RIGHT_MOUSE, cmd=self.update)
-
def set_output(self, value):
self.__output = value
|
Fix Syntax Error in Calling b2a_hex()
s/hbyteorder/byteorder/ | @@ -30,7 +30,7 @@ LOGGER = logging.getLogger(__name__)
class NoopPayload(object):
def __init__(self):
self.nonce = binascii.b2a_hex(random.getrandbits(
- 8 * 8).to_bytes(8, hbyteorder='little'))
+ 8 * 8).to_bytes(8, byteorder='little'))
self._sha512 = None
def sha512(self):
|
Fix datastreamer
Summary: `WorkerDone` is put to the queue at the end of data. It's a namedtuple, which is an instance of Sequence. The change preserves the type of the object, which is necessary to make data streamer stops. | @@ -144,17 +144,21 @@ def pin_memory(batch):
elif isinstance(batch, string_classes):
return batch
elif dataclasses.is_dataclass(batch):
- retval = dataclasses.replace(
+ return dataclasses.replace(
batch,
**{
field.name: pin_memory(getattr(batch, field.name))
for field in dataclasses.fields(batch)
}
)
- return retval
elif isinstance(batch, collections.Mapping):
# NB: preserving OrderedDict
return type(batch)((k, pin_memory(sample)) for k, sample in batch.items())
+ elif isinstance(batch, NamedTuple) or hasattr(batch, "_asdict"):
+ # This is mainly for WorkerDone
+ return type(batch)(
+ **{name: pin_memory(value) for name, value in batch._asdict().items()}
+ )
elif isinstance(batch, collections.Sequence):
return [pin_memory(sample) for sample in batch]
else:
|
Replace USER_EDIT_PAGE links with UserEditPage getter
Fix missing user id param | import { mapState, mapGetters } from 'vuex';
import { UserKinds } from 'kolibri.coreVue.vuex.constants';
import commonCoreStrings from 'kolibri.coreVue.mixins.commonCoreStrings';
+ import cloneDeep from 'lodash/cloneDeep';
import PaginatedListContainer from 'kolibri.coreVue.components.PaginatedListContainer';
import UserTable from '../UserTable';
import { Modals } from '../../constants';
},
handleManageUserSelection(selection, user) {
if (selection.value === Modals.EDIT_USER) {
- const params = {
- id: user.id,
- };
-
- if (this.$store.getters.inMultipleFacilityPage) {
- params.facility_id = this.$store.getters.activeFacilityId;
- }
- this.$router.push(this.$router.getRoute('USER_EDIT_PAGE', params));
+ const link = cloneDeep(this.$store.getters.facilityPageLinks.UserEditPage);
+ link.params.id = user.id;
+ this.$router.push(link);
} else {
this.selectedUser = user;
this.modalShown = selection.value;
|
Update README.md
Adding payloads for Citrix and Cisco | @@ -388,6 +388,19 @@ Assuming payloads such as the previous return a verbose error. You can start poi
]>
<root></root>
```
+### Cisco WebEx
+```
+<!ENTITY % local_dtd SYSTEM "file:///usr/share/xml/scrollkeeper/dtds/scrollkeeper-omf.dtd">
+<!ENTITY % url.attribute.set '>Your DTD code<!ENTITY test "test"'>
+%local_dtd;
+```
+### Citrix XenMobile Server
+```
+<!ENTITY % local_dtd SYSTEM "jar:file:///opt/sas/sw/tomcat/shared/lib/jsp-api.jar!/javax/servlet/jsp/resources/jspxml.dtd">
+<!ENTITY % Body '>Your DTD code<!ENTITY test "test"'>
+%local_dtd;
+```
+[Payloads for Cisco and Citrix](https://mohemiv.com/all/exploiting-xxe-with-local-dtd-files/)
[Other payloads using different DTDs](https://github.com/GoSecure/dtd-finder/blob/master/list/xxe_payloads.md)
|
PoseNet example
[Example] PoseNet | @@ -60,22 +60,18 @@ async function detectPoseInRealTime(video) {
await predict();
algorithm.onChange((algorithm) => {
guiState.algorithm = algorithm;
- poseDetectionFrame();
});
scoreThreshold.onChange((scoreThreshold) => {
guiState.scoreThreshold = scoreThreshold;
util._minScore = guiState.scoreThreshold;
- poseDetectionFrame();
});
nmsRadius.onChange((nmsRadius) => {
guiState.multiPoseDetection.nmsRadius = nmsRadius;
util._nmsRadius = guiState.multiPoseDetection.nmsRadius;
- poseDetectionFrame();
});
maxDetections.onChange((maxDetections) => {
guiState.multiPoseDetection.maxDetections = maxDetections;
util._maxDetection = guiState.multiPoseDetection.maxDetections;
- poseDetectionFrame();
});
model.onChange((model) => {
guiState.model = model;
@@ -111,7 +107,6 @@ async function detectPoseInRealTime(video) {
setTimeout(() => {
util.init(newBackend, inputSize).then(() => {
updateBackend();
- poseDetectionFrame();
});
}, 10);
}
@@ -145,7 +140,6 @@ async function detectPoseInRealTime(video) {
} else {
util.init(currentBackend, inputSize).then(() => {
updateBackend();
- poseDetectionFrame();
});
}
}
|
Update CLI to permit quitting.
The autoloaded device wouldn't quit properly. Now it should. | @@ -50,8 +50,8 @@ parser.add_argument('-gb', '--adjust_y', type=int, help='adjust grbl home_y posi
parser.add_argument('-rs', '--ruida', action='store_true', help='run ruida-emulator')
-args = parser.parse_args(sys.argv[1:])
-# args = parser.parse_args(["-zc"])
+# args = parser.parse_args(sys.argv[1:])
+args = parser.parse_args(["-zc"])
kernel.register('static', 'RasterScripts', RasterScripts)
kernel.register('module', 'Console', Console)
@@ -81,7 +81,7 @@ if 'device' in kernel.instances:
else:
if args.no_gui:
# Without a booted device, if also no gui, just start a default device.
- device = kernel.open('device', 'Lhystudios')
+ device = kernel.open('device', 'Lhystudios', instance_name='1', uid=1)
device.boot()
pass
else:
|
catch exceptions for safety
Since this isn't a critical piece of functionality we don't want
to break app saves if this fails. | @@ -4,6 +4,7 @@ from django.dispatch.dispatcher import Signal
from corehq.apps.callcenter.app_parser import get_call_center_config_from_app
from corehq.apps.domain.models import Domain
+from dimagi.utils.logging import notify_exception
def create_app_structure_repeat_records(sender, application, **kwargs):
@@ -19,6 +20,7 @@ def update_callcenter_config(sender, application, **kwargs):
if not application.copy_of:
return
+ try:
domain = Domain.get_by_name(application.domain)
cc_config = domain.call_center_config
if not cc_config or not (cc_config.fixtures_are_active() and cc_config.config_is_valid()):
@@ -28,6 +30,8 @@ def update_callcenter_config(sender, application, **kwargs):
save = cc_config.update_from_app_config(app_config)
if save:
cc_config.save()
+ except Exception:
+ notify_exception(None, "Error updating CallCenter config for app build")
app_post_save = Signal(providing_args=['application'])
|
Handling potential CRS mismatch
This fixes the error described in issue . | @@ -43,10 +43,13 @@ def calc_bounding_box_projected_coordinates(shapefile_zone, shapefile_surroundin
# connect both files and avoid repetition
data_zone = Gdf.from_file(shapefile_zone)
+ data_zone = data_zone.to_crs(get_geographic_coordinate_system())
+
data_dis = Gdf.from_file(shapefile_surroundings)
data_dis = data_dis.loc[~data_dis["Name"].isin(data_zone["Name"])]
- data = data_dis.append(data_zone, ignore_index = True, sort=True)
- data = data.to_crs(get_geographic_coordinate_system())
+ data_dis = data_dis.to_crs(get_geographic_coordinate_system())
+
+ data = data_zone.append(data_dis, ignore_index = True, sort=True)
lon = data.geometry[0].centroid.coords.xy[0][0]
lat = data.geometry[0].centroid.coords.xy[1][0]
crs = get_projected_coordinate_system(float(lat), float(lon))
@@ -74,7 +77,7 @@ def terrain_elevation_extractor(locator, config):
locator.get_surroundings_geometry()), 'Get surroundings geometry file first or the coordinates of the area where' \
' to extract the terrain from in the next format: lon_min, lat_min, lon_max, lat_max'
print("generating terrain from Surroundings area")
- bounding_box_surroundings_file, crs, lon, lat = calc_bounding_box_projected_coordinates(locator.get_surroundings_geometry(), locator.get_zone_geometry())
+ bounding_box_surroundings_file, crs, lon, lat = calc_bounding_box_projected_coordinates(locator.get_zone_geometry(), locator.get_surroundings_geometry())
x_min = bounding_box_surroundings_file[0] - extra_border
y_min = bounding_box_surroundings_file[1] - extra_border
x_max = bounding_box_surroundings_file[2] + extra_border
|
performance improvement
changed the printing part which create a new String everytime it does str += | @@ -54,15 +54,11 @@ public class InsertionSort {
numList.set(k+1, val);
}
//display the sorted list to the user
- String str="";
for(int i=0; i < numList.size() - 1; i++) {
- str+=String.valueOf(numList.get(i));
- str+=", ";
- }
- str+=String.valueOf(numList.get(numList.size()-1));
- System.out.println(str);
+ System.out.print(numList.get(i) + ", ");
}
+ System.out.print(numList.get(numList.size() - 1));
}
-
+}
|
Enable the 'redis' / 'aiohttp' Sentry integrations
This will provide breadcrumbs for these systems in all our Sentry
events, if applicable.
Closes | @@ -3,7 +3,9 @@ import logging
import discord
import sentry_sdk
from discord.ext.commands import when_mentioned_or
+from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
+from sentry_sdk.integrations.redis import RedisIntegration
from bot import constants, patches
from bot.bot import Bot
@@ -15,7 +17,11 @@ sentry_logging = LoggingIntegration(
sentry_sdk.init(
dsn=constants.Bot.sentry_dsn,
- integrations=[sentry_logging]
+ integrations=[
+ sentry_logging,
+ AioHttpIntegration(),
+ RedisIntegration(),
+ ]
)
bot = Bot(
|
Add document for case sensitivity
Resolves | @@ -138,6 +138,31 @@ this behavior. For instance, see below:
...
Reference 'a' is ambiguous, could be: a, a.;
+Additionally, it is strongly discouraged to use case sensitive column names. Koalas disallows it by default.
+
+.. code-block:: python
+
+ >>> import databricks.koalas as ks
+ >>> kdf = ks.DataFrame({'a': [1, 2], 'A':[3, 4]})
+ ...
+ Reference 'a' is ambiguous, could be: a, a.;
+
+However, you can turn on ``spark.sql.caseSensitive`` in Spark configuration to enable it if you use on your own risk.
+
+.. code-block:: python
+
+ >>> from pyspark.sql import SparkSession
+ >>> builder = SparkSession.builder.appName("Koalas")
+ >>> builder = builder.config("spark.sql.caseSensitive", "true")
+ >>> builder.getOrCreate()
+
+ >>> import databricks.koalas as ks
+ >>> kdf = ks.DataFrame({'a': [1, 2], 'A':[3, 4]})
+ >>> kdf
+ a A
+ 0 1 3
+ 1 2 4
+
Specify the index column in conversion from Spark DataFrame to Koalas DataFrame
-------------------------------------------------------------------------------
@@ -248,4 +273,3 @@ The example above can be also changed to directly using Koalas APIs as below:
New York 441.0
Helsinki 144.0
Name: 0, dtype: float64
-
|
Surface data structure fix
Surface data parameters were returned as arrays instead of scalars. This
is now fixed. | @@ -2578,9 +2578,7 @@ class GempakSurface(GempakFile):
station[param] = packed_buffer[iprm].decode().strip()
else:
for iprm, param in enumerate(parameters['name']):
- station[param] = np.array(
- packed_buffer[iprm], dtype=np.float32
- )
+ station[param] = packed_buffer[iprm]
stations.append(station)
return stations
|
Replace link to 404 object IAM docs with a note on limited utility.
Closes | @@ -1183,8 +1183,13 @@ class Blob(_PropertyMixin):
def get_iam_policy(self, client=None):
"""Retrieve the IAM policy for the object.
- See
- https://cloud.google.com/storage/docs/json_api/v1/objects/getIamPolicy
+ .. note:
+
+ Blob- / object-level IAM support does not yet exist and methods
+ currently call an internal ACL backend not providing any utility
+ beyond the blob's :attr:`acl` at this time. The API may be enhanced
+ in the future and is currently undocumented. Use :attr:`acl` for
+ managing object access control.
If :attr:`user_project` is set on the bucket, bills the API request
to that project.
@@ -1215,8 +1220,13 @@ class Blob(_PropertyMixin):
def set_iam_policy(self, policy, client=None):
"""Update the IAM policy for the bucket.
- See
- https://cloud.google.com/storage/docs/json_api/v1/objects/setIamPolicy
+ .. note:
+
+ Blob- / object-level IAM support does not yet exist and methods
+ currently call an internal ACL backend not providing any utility
+ beyond the blob's :attr:`acl` at this time. The API may be enhanced
+ in the future and is currently undocumented. Use :attr:`acl` for
+ managing object access control.
If :attr:`user_project` is set on the bucket, bills the API request
to that project.
@@ -1253,8 +1263,13 @@ class Blob(_PropertyMixin):
def test_iam_permissions(self, permissions, client=None):
"""API call: test permissions
- See
- https://cloud.google.com/storage/docs/json_api/v1/objects/testIamPermissions
+ .. note:
+
+ Blob- / object-level IAM support does not yet exist and methods
+ currently call an internal ACL backend not providing any utility
+ beyond the blob's :attr:`acl` at this time. The API may be enhanced
+ in the future and is currently undocumented. Use :attr:`acl` for
+ managing object access control.
If :attr:`user_project` is set on the bucket, bills the API request
to that project.
|
Rolling upgrades: Migrate to ceph-key module
This change moves ceph-mgr upgrades to using ceph-key library.
Fixes: | vars:
upgrade_ceph_packages: True
+ ceph_release: "{{ ceph_stable_release }}"
hosts:
- "{{ mgr_group_name|default('mgrs') }}"
set_fact:
ceph_cluster_fsid: "{{ cluster_uuid_container.stdout if containerized_deployment else cluster_uuid_non_container.stdout }}"
- - name: non container | create ceph mgr keyring(s)
- command: "ceph --cluster {{ cluster }} auth get-or-create mgr.{{ hostvars[item]['ansible_hostname'] }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
- args:
- creates: "{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
- changed_when: false
- delegate_to: "{{ groups[mon_group_name][0] }}"
- with_items:
- - "{{ groups.get(mgr_group_name, []) }}"
+ - name: create ceph mgr keyring(s) when mon is not containerized
+ ceph_key:
+ name: "mgr.{{ hostvars[item]['ansible_hostname'] }}"
+ state: present
+ caps:
+ mon: allow profile mgr
+ osd: allow *
+ mds: allow *
+ cluster: "{{ cluster }}"
when:
- not containerized_deployment
- - "{{ groups.get(mgr_group_name, []) | length > 0 }}"
-
- - name: container | create ceph mgr keyring(s)
- command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} auth get-or-create mgr.{{ hostvars[item]['ansible_hostname'] }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' -o /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
- args:
- creates: "{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
- changed_when: false
+ - cephx
+ - groups.get(mgr_group_name, []) | length > 0
+ - ceph_release_num[ceph_release] >= ceph_release_num.luminous
delegate_to: "{{ groups[mon_group_name][0] }}"
- with_items:
- - "{{ groups.get(mgr_group_name, []) }}"
+ with_items: "{{ groups.get(mgr_group_name, []) }}"
+
+ - name: create ceph mgr keyring(s) when mon is containerized
+ ceph_key:
+ name: "mgr.{{ hostvars[item]['ansible_hostname'] }}"
+ state: present
+ caps:
+ mon: allow profile mgr
+ osd: allow *
+ mds: allow *
+ cluster: "{{ cluster }}"
+ containerized: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
- - "{{ groups.get(mgr_group_name, []) | length > 0 }}"
+ - cephx
+ - groups.get(mgr_group_name, []) | length > 0
+ - inventory_hostname == groups[mon_group_name]|last
+ - ceph_release_num[ceph_release] >= ceph_release_num.luminous
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_items: "{{ groups.get(mgr_group_name, []) }}"
- name: fetch ceph mgr key(s)
fetch:
|
Avoid caching event loop in telethon.sync
This would cause issues when creating and setting a different
event loop from a different thread since it would use the old
loop and not the new one, solved by explicitly getting the
loop every time, although this has a slight performance hit. | @@ -25,8 +25,7 @@ from .tl.custom.sendergetter import SenderGetter
class _SyncGen:
- def __init__(self, loop, gen):
- self.loop = loop
+ def __init__(self, gen):
self.gen = gen
def __iter__(self):
@@ -34,21 +33,25 @@ class _SyncGen:
def __next__(self):
try:
- return self.loop.run_until_complete(self.gen.__anext__())
+ return asyncio.get_event_loop() \
+ .run_until_complete(self.gen.__anext__())
except StopAsyncIteration:
raise StopIteration from None
-def _syncify_wrap(t, method_name, syncifier):
+def _syncify_wrap(t, method_name, gen):
method = getattr(t, method_name)
@functools.wraps(method)
def syncified(*args, **kwargs):
coro = method(*args, **kwargs)
- return (
- coro if asyncio.get_event_loop().is_running()
- else syncifier(coro)
- )
+ loop = asyncio.get_event_loop()
+ if loop.is_running():
+ return coro
+ elif gen:
+ return _SyncGen(coro)
+ else:
+ return loop.run_until_complete(coro)
# Save an accessible reference to the original method
setattr(syncified, '__tl.sync', method)
@@ -61,14 +64,13 @@ def syncify(*types):
into synchronous, which return either the coroutine or the result
based on whether ``asyncio's`` event loop is running.
"""
- loop = asyncio.get_event_loop()
for t in types:
for name in dir(t):
if not name.startswith('_') or name == '__call__':
if inspect.iscoroutinefunction(getattr(t, name)):
- _syncify_wrap(t, name, loop.run_until_complete)
+ _syncify_wrap(t, name, gen=False)
elif isasyncgenfunction(getattr(t, name)):
- _syncify_wrap(t, name, functools.partial(_SyncGen, loop))
+ _syncify_wrap(t, name, gen=True)
syncify(TelegramClient, Draft, Dialog, MessageButton,
|
rbd-mirror: Allow to copy the admin keyring
The ceph-rbd-mirror role allows to copy the admin keyring via the
copy_admin_key variable but there's actually no task in that role
doing the job. | group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
+- name: copy ceph admin keyring if needed
+ copy:
+ src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
+ dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
+ owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
+ group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
+ mode: "{{ ceph_keyring_permissions }}"
+ when: copy_admin_key | bool
+
- name: create rbd-mirror keyring
command: >
ceph --cluster {{ cluster }}
|
Update rc_model_SIA.py
account for internal loads of light and appliance with factor (f = Hs/Es)
account for soler heat gains with factor (f = sqrt(Hs)) | @@ -590,11 +590,10 @@ def calc_rc_model_temperatures(phi_hc_cv, phi_hc_r, bpr, tsd, t):
m_ve_mech = tsd['m_ve_mech'][t]
m_ve_window = tsd['m_ve_window'][t]
m_ve_inf = tsd['m_ve_inf'][t]
- El = tsd['El'][t]
- Ea = tsd['Ea'][t]
+ El = tsd['El'][t] * (bpr.architecture.Hs / bpr.architecture.Es) # account for a proportion of internal gains
+ Ea = tsd['Ea'][t] * (bpr.architecture.Hs / bpr.architecture.Es) # account for a proportion of internal gains
Epro = tsd['Epro'][t]
- people = tsd['people'][t]
- I_sol = tsd['I_sol_and_I_rad'][t]
+ I_sol = tsd['I_sol_and_I_rad'][t] * np.sqrt(bpr.architecture.Hs) # account for a proportion of solar gains
T_ext = tsd['T_ext'][t]
theta_ve_mech = tsd['theta_ve_mech'][t]
|
Add links to GitHub and GitHub issues
Also capitalize some sentence starts | @@ -66,8 +66,8 @@ follows:
Next Steps
----------
-* `sign up for release announcements <https://groups.google.com/group/nltk>`_
-* `join in the discussion <https://groups.google.com/group/nltk-users>`_
+* `Sign up for release announcements <https://groups.google.com/group/nltk>`_
+* `Join in the discussion <https://groups.google.com/group/nltk-users>`_
.. toctree::
@@ -80,6 +80,8 @@ Next Steps
Module Index <py-modindex>
Wiki <https://github.com/nltk/nltk/wiki>
FAQ <https://github.com/nltk/nltk/wiki/FAQ>
+ Open Issues <https://github.com/nltk/nltk/issues>
+ NLTK on GitHub <https://github.com/nltk/nltk>
.. toctree::
:maxdepth: 1
|
Azure XML adjustment
the jackson dependencies have been commented | <version>${org.mongodb.version}</version>
</dependency>
- <dependency>
+ <!--dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-yaml</artifactId>
<version>2.9.5</version>
- </dependency>
+ </dependency-->
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.5</version>
</dependency>
- <dependency>
+ <!--dependency>
<groupId>com.fasterxml.jackson.datatype</groupId>
<artifactId>jackson-datatype-guava</artifactId>
<version>2.11.0</version>
- </dependency>
- <dependency>
+ </dependency-->
+ <!--dependency>
<groupId>com.fasterxml.jackson.datatype</groupId>
<artifactId>jackson-datatype-jsr310</artifactId>
<version>2.11.0</version>
- </dependency>
+ </dependency-->
</dependencies>
|
Close the unused open file handle
I didn't see anything in tempfile that looked like we could skip opening it in
the first place | @@ -59,7 +59,8 @@ class TransientTempfile(object):
"""
def __enter__(self):
- _, self.path = tempfile.mkstemp()
+ fd, self.path = tempfile.mkstemp()
+ os.close(fd)
return self.path
def __exit__(self, exc_type, exc_val, exc_tb):
|
fix return type for `Table.expand()`
return type for `Table.expand()` was `int` should be `bool` based on the type annotation for `expand` argument for `Table.__init__` | @@ -247,7 +247,7 @@ class Table(JupyterMixin):
)
@property
- def expand(self) -> int:
+ def expand(self) -> bool:
"""Setting a non-None self.width implies expand."""
return self._expand or self.width is not None
|
Unify_One_Side: make trace message more specific
TN: | @@ -19,7 +19,7 @@ package body Langkit_Support.Adalog.Unify_One_Side is
function Apply (Self : in out Unify_Rec) return Boolean is
begin
- Trace ("In Unify");
+ Trace ("In Unify_One_Side");
if Is_Defined (Self.Left) then
Trace ("Left defined");
|
message view: Refactor _rerender_message to accept object of parameters.
It is changed to improve readability, about what parameters are being
passed. | @@ -1163,7 +1163,7 @@ export class MessageListView {
header.replaceWith(rendered_recipient_row);
}
- _rerender_message(message_container, message_content_edited, is_revealed) {
+ _rerender_message(message_container, {message_content_edited, is_revealed}) {
const row = this.get_row(message_container.msg.id);
const was_selected = this.list.selected_message() === message_container.msg;
@@ -1183,12 +1183,18 @@ export class MessageListView {
reveal_hidden_message(message_id) {
const message_container = this.message_containers.get(message_id);
- this._rerender_message(message_container, false, true);
+ this._rerender_message(message_container, {
+ message_content_edited: false,
+ is_revealed: true,
+ });
}
hide_revealed_message(message_id) {
const message_container = this.message_containers.get(message_id);
- this._rerender_message(message_container, false, false);
+ this._rerender_message(message_container, {
+ message_content_edited: false,
+ is_revealed: false,
+ });
}
rerender_messages(messages, message_content_edited) {
@@ -1212,7 +1218,7 @@ export class MessageListView {
message_groups.push(current_group);
current_group = [];
}
- this._rerender_message(message_container, message_content_edited);
+ this._rerender_message(message_container, {message_content_edited, is_revealed: false});
}
if (current_group.length !== 0) {
|
[hail] add make clean
* [hail] add make clean
* fix makefile
g | @@ -339,3 +339,11 @@ native-lib-prebuilt:
$(MAKE) -C src/main/c prebuilt
native-lib-reset-prebuilt:
$(MAKE) -C src/main/c reset-prebuilt
+
+clean: clean-env clean-docs native-lib-clean
+ ./gradlew clean $(GRADLE_ARGS)
+ rm -rf build/
+ rm -rf python/hail/hail-all-spark.jar
+ rm -rf python/README.md
+ rm -rf $(SCALA_BUILD_INFO)
+ rm -rf $(PYTHON_VERSION_INFO)
|
Remove pre 3.3 special casing
We aren't supporting those versions anymore anyways and it's hard to get
test coverage on the code path. | @@ -9,12 +9,8 @@ available from its original location oemof/oemof/groupings.py
SPDX-License-Identifier: GPL-3.0-or-later
"""
-try:
from collections.abc import (Hashable, Iterable, Mapping,
MutableMapping as MuMa)
-except ImportError:
- from collections import (Hashable, Iterable, Mapping,
- MutableMapping as MuMa)
from itertools import chain, filterfalse
|
Refactor of logic to make code more readable
Functionality remains as it was | @@ -248,12 +248,9 @@ def preview_letter_template_by_notification_id(service_id, notification_id, file
page_number = page if page else "1"
content = base64.b64encode(pdf_file).decode('utf-8')
content_outside_printable_area = metadata.get("message") == "content-outside-printable-area"
+ page_is_in_invalid_pages = page_number in metadata.get('invalid_pages', '[]')
- show_overlay_for_page = False
- if content_outside_printable_area and page_number in metadata.get('invalid_pages', '[]'):
- show_overlay_for_page = True
-
- if show_overlay_for_page or (content_outside_printable_area and file_type == "pdf"):
+ if content_outside_printable_area and (file_type == "pdf" or page_is_in_invalid_pages):
path = '/precompiled/overlay.{}'.format(file_type)
query_string = '?page_number={}'.format(page_number) if file_type == 'png' else ''
content = pdf_file
@@ -266,7 +263,7 @@ def preview_letter_template_by_notification_id(service_id, notification_id, file
if file_type == 'png':
try:
pdf_page = extract_page_from_pdf(BytesIO(pdf_file), int(page_number) - 1)
- content = pdf_page if show_overlay_for_page else base64.b64encode(pdf_page).decode('utf-8')
+ content = pdf_page if page_is_in_invalid_pages else base64.b64encode(pdf_page).decode('utf-8')
except PdfReadError as e:
raise InvalidRequest(
'Error extracting requested page from PDF file for notification_id {} type {} {}'.format(
|
change docs for ray.remote num_gpus
The documentation says that can take fractional num_gpus which is true, but the documentation lists it as an integer. I think this is strictly a problem in the docs. | @@ -2218,7 +2218,7 @@ def remote(*args, **kwargs):
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
- num_gpus (int): The quantity of GPUs to reserve
+ num_gpus (float): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
|
Enable admin management of curricula
This commit adds Curriculum model to Django admin, and makes it possible
for people with proper permissions to view/add/change/delete the
records from within. This is another part that was required by | @@ -11,6 +11,7 @@ from .models import (
Badge,
TrainingRequirement,
TrainingRequest,
+ Curriculum,
)
@@ -18,6 +19,10 @@ class RoleAdmin(admin.ModelAdmin):
list_display = ('name', 'verbose_name')
+class CurriculumAdmin(admin.ModelAdmin):
+ list_display = ('__str__', 'slug', 'name', 'active', 'unknown')
+
+
admin.site.register(Tag)
admin.site.register(AcademicLevel)
admin.site.register(ComputingExperienceLevel)
@@ -28,3 +33,4 @@ admin.site.register(KnowledgeDomain)
admin.site.register(Badge)
admin.site.register(TrainingRequirement)
admin.site.register(TrainingRequest)
+admin.site.register(Curriculum, CurriculumAdmin)
|
`select_related` collection on image listing
`images/results.html` shows the collection if it's available, which
results in `O(n)` queries. | @@ -43,7 +43,7 @@ class BaseListingView(TemplateView):
# Get images (filtered by user permission)
images = permission_policy.instances_user_has_any_permission_for(
self.request.user, ['change', 'delete']
- ).order_by('-created_at')
+ ).order_by('-created_at').select_related('collection')
# Search
query_string = None
|
cortex_test.py: added test cases.
Verify r/w core regs while running raises exception.
Verify that all core registers can be read and written.
Aligned printed timings for simulated gdb operations. | @@ -317,6 +317,38 @@ def cortex_test(board_id):
origRegs[0] = origR0
target.write_core_registers_raw(['r0', 'r1', 'r2', 'r3'], origRegs)
+ print("Verify exception is raised while core is running")
+ target.resume()
+ try:
+ val = target.read_core_register('r0')
+ except exceptions.CoreRegisterAccessError:
+ passed = True
+ else:
+ passed = False
+ test_count += 1
+ if passed:
+ test_pass_count += 1
+ print("TEST PASSED")
+ else:
+ print("TEST FAILED")
+
+ print("Verify failure to write core register while running raises exception")
+ try:
+ target.write_core_register('r0', 0x1234)
+ except exceptions.CoreRegisterAccessError:
+ passed = True
+ else:
+ passed = False
+ test_count += 1
+ if passed:
+ test_pass_count += 1
+ print("TEST PASSED")
+ else:
+ print("TEST FAILED")
+
+ # Resume execution.
+ target.halt()
+
if target.selected_core.has_fpu:
print("Reading s0")
val = target.read_core_register('s0')
@@ -377,6 +409,23 @@ def cortex_test(board_id):
origRegs[0] = origRawS0
target.write_core_registers_raw(['s0', 's1'], origRegs)
+ print("Verify that all listed core registers can be accessed")
+ reg_count = 0
+ passed_reg_count = 0
+ for r in target.selected_core.core_registers.as_set:
+ try:
+ reg_count += 1
+ val = target.read_core_register(r.name)
+ target.write_core_register(r.name, val)
+ passed_reg_count += 1
+ except exceptions.CoreRegisterAccessError:
+ pass
+ test_count += 1
+ if passed_reg_count == reg_count:
+ test_pass_count += 1
+ print("TEST PASSED (%i registers)" % reg_count)
+ else:
+ print("TEST FAILED (%i registers, %i failed)" % (reg_count, reg_count - passed_reg_count))
print("\n\n------ Testing Invalid Memory Access Recovery ------")
memory_access_pass = True
@@ -529,6 +578,13 @@ def cortex_test(board_id):
else:
print("TEST FAILED")
+ print("\nTest Summary:")
+ print("Pass count %i of %i tests" % (test_pass_count, test_count))
+ if test_pass_count == test_count:
+ print("CORTEX TEST PASSED")
+ else:
+ print("CORTEX TEST FAILED")
+
target.reset()
result.passed = test_count == test_pass_count
|
Add get_bi_selector to managed_object model
HG--
branch : feature/microservices | @@ -1300,6 +1300,32 @@ class ManagedObject(Model):
def allow_autosegmentation(self):
return self.get_autosegmentation_policy() == "e"
+ @classmethod
+ def get_bi_selector(cls, cfg):
+ qs = {}
+ print cfg
+ if "administrative_domain" in cfg:
+ d = AdministrativeDomain.get_by_id(cfg["administrative_domain"])
+ if d:
+ qs["administrative_domain__in"] = d.get_nested()
+ if "pool" in cfg:
+ qs["pool__in"] = [cfg["pool"]]
+ if "profile" in cfg:
+ qs["profile__in"] = [cfg["profile"]]
+ if "segment" in cfg:
+ qs["segment__in"] = [cfg["segment"]]
+ if "container" in cfg:
+ qs["container__in"] = [cfg["container"]]
+ if "platform" in cfg:
+ qs["platform__in"] = [cfg["pool"]]
+ if "version" in cfg:
+ qs["version__in"] = [cfg["version"]]
+ return [
+ r.bi_id
+ for r in ManagedObject.objects.filter(**qs).only("id", "bi_id")
+ ]
+
+
@on_save
class ManagedObjectAttribute(Model):
|
fix bug where copying output and error file resulted in same file when
multiple tests were declared in a single buildspec | @@ -305,11 +305,11 @@ class BuilderBase(ABC):
"""Copy output and error file into test root directory since stage directory will be removed."""
shutil.copy2(
- self.metadata["outfile"],
+ os.path.join(self.stage_dir, os.path.basename(self.metadata["outfile"])),
os.path.join(self.test_root, os.path.basename(self.metadata["outfile"])),
)
shutil.copy2(
- self.metadata["errfile"],
+ os.path.join(self.stage_dir, os.path.basename(self.metadata["errfile"])),
os.path.join(self.test_root, os.path.basename(self.metadata["errfile"])),
)
|
Constrain log length for CBC proxy payload
This avoids any issues due to large payloads (e.g. with a lot of
polygons in the 'areas' field). While we may miss part of the log
in such cases, this is more than we get already anyway. | @@ -123,7 +123,7 @@ class CBCProxyClientBase(ABC):
payload_bytes = bytes(json.dumps(payload), encoding='utf8')
try:
current_app.logger.info(
- f"Calling lambda {lambda_name} with payload {payload}"
+ f"Calling lambda {lambda_name} with payload {str(payload)[:1000]}"
)
result = self._lambda_client.invoke(
|
util/rhsm: Check if repositories is None before iterating
When `get_fallback_rhsm_secrets` was used, `Subscriptions.repositories`
was None, and `get_secrets` never returned the fallback secrets.
So check if `repositories` is None before
iterating over it, otherwise return the fallback secrets. | @@ -93,6 +93,7 @@ class Subscriptions:
def get_secrets(self, url):
# Try to find a matching URL from redhat.repo file first
+ if self.repositories is not None:
for parameters in self.repositories.values():
if parameters["matchurl"].match(url) is not None:
return {
|
Adds guide for the kubeflow dag runner image specification retrospectively.
See #3761#issuecomment-846723307 for the discussion. | * `--package-path` and `--skaffold_cmd` flags were deleted. The compiled path
can be specified when creating a KubeflowDagRunner class instance. TFX CLI
doesn't depend on skaffold any more and use Docker SDK directly.
+* Specify the container image for KubeflowDagRunner in the
+ KubeflowDagRunnerConfig directly instead of reading it from an environment
+ variable. CLI will not set `KUBEFLOW_TFX_IMAGE` environment variable any
+ more. See
+ [example](https://github.com/tensorflow/tfx/blob/c315e7cf75822088e974e15b43c96fab86746733/tfx/experimental/templates/taxi/kubeflow_runner.py#L63).
* Default orchestration engine of CLI was changed to `local` orchestrator from
`beam` orchestrator. You can still use `beam` orchestrator with
`--engine=beam` flag.
|
Fix how stylechecks.py handles exclude directories...
... instead of testing whether the excluded pattern is present in the
tested path, test that the tested path ends with the excluded pattern.
The previous behavior was preventing "astdoc" from being checked by
stylechecks because the "doc" directory was excluded.
TN: | @@ -569,6 +569,18 @@ def check_file(report, filename): # pragma: no cover
check_file_content(report, filename, content)
+def excludes_match(path, excludes):
+ """
+ Return whether at least one item in `excludes` matches the `path`.
+
+ :type path: str
+ :type excludes: list[str]
+ :rtype: bool
+ """
+ return any(path.endswith(os.path.sep + e)
+ for e in excludes)
+
+
def traverse(report, root, excludes): # pragma: no cover
"""
Perform generic and language-specific style checks.
@@ -581,12 +593,7 @@ def traverse(report, root, excludes): # pragma: no cover
"""
for item in sorted(os.listdir(root)):
path = os.path.join(root, item)
- to_exclude = False
- for e in excludes:
- if e in path:
- to_exclude = True
- break
- if to_exclude:
+ if excludes_match(path, excludes):
continue
if os.path.isdir(path):
|
Fix history saved to original subject instance
Fixes | @@ -66,7 +66,7 @@ class Subject(dict):
value = copy.deepcopy(value)
result_dict[key] = value
new = Subject(result_dict)
- new.history = self.history
+ new.history = self.history[:]
return new
@staticmethod
|
Automatically expose Entity_Info in the public APIs
TN: | @@ -1466,7 +1466,7 @@ class CompileCtx(object):
This also emits non-blocking errors for all types that are exposed in
the public API whereas they should not.
"""
- from langkit.compiled_types import ArrayType, Struct
+ from langkit.compiled_types import ArrayType, Struct, StructMetaclass
# All code must ignore _exposed attributes when the following is true
if self.library_fields_all_public:
@@ -1508,6 +1508,9 @@ class CompileCtx(object):
t._exposed = True
+ # Expose builtin types that we want in the public APIs
+ expose(StructMetaclass.entity_info, None, None, [])
+
for f in astnode.get_abstract_fields(
predicate=lambda f: f.is_public,
include_inherited=False
|
Tweak format string used to generate robot documentation timestamp.
The previous version fails on windows, which I'm guessing has to do with
a couple of dashes (which are unnecessary; I don't even know why they
were there in the first place). | @@ -88,7 +88,7 @@ class RobotDoc(BaseTask):
"""Generate the html. `libraries` is a list of LibraryDocumentation objects"""
title = self.options.get("title", "Keyword Documentation")
- date = time.strftime("%A %B %-d, %-I:%M %p")
+ date = time.strftime("%A %B %d, %I:%M %p")
cci_version = cumulusci.__version__
stylesheet_path = os.path.join(os.path.dirname(__file__), "stylesheet.css")
|
Using debug findsource
Was working locally but not on CI | @@ -36,6 +36,7 @@ import sys
ORIGINAL_getsourcefile = inspect.getsourcefile
+ORIGINAL_findsource = inspect.findsource
THIS_FILE = os.path.abspath(__file__)
THIS_DIR = os.path.dirname(THIS_FILE)
REZ_SOURCE_DIR = os.getenv("REZ_SOURCE_DIR", os.path.dirname(THIS_DIR))
@@ -52,6 +53,39 @@ CLONE_URL = os.getenv(
)
+def DEBUG_findsource(object):
+ """From inspect.findsource in Python 3.7.4"""
+ import linecache
+ from inspect import getfile, getmodule, getsourcefile
+
+ file = getsourcefile(object)
+ print("getsourcefile: {} {}".format(bool(file), file))
+ if file:
+ # Invalidate cache if needed.
+ linecache.checkcache(file)
+ else:
+ file = getfile(object)
+ print("getfile: {} {}".format(bool(file), file))
+ # Allow filenames in form of "<something>" to pass through.
+ # `doctest` monkeypatches `linecache` module to enable
+ # inspection, so let `linecache.getlines` to be called.
+ if not (file.startswith('<') and file.endswith('>')):
+ raise OSError('source code not available')
+
+ module = getmodule(object, file)
+ print("getmodule: {} {}".format(bool(module), module))
+ if module:
+ lines = linecache.getlines(file, module.__dict__)
+ print("lines from module dict: {} {}".format(bool(lines), lines))
+ else:
+ lines = linecache.getlines(file)
+ print("lines: {} {}".format(bool(lines), lines))
+ if not lines:
+ raise OSError('could not get source code')
+
+ return ORIGINAL_findsource(object)
+
+
def PATCHED_getsourcefile(obj):
"""Patch to not return None if path from inspect.getfile is not absolute.
@@ -563,15 +597,12 @@ def make_cli_source_link():
"(https://github.com/{repo}/blob/{branch}/{path}#L{start}-L{end})"
)
- lines, start = inspect.getsourcelines(make_cli_markdown)
- '''
try:
# Patch inspect.getsourcefile which is called by inspect.getsourcelines
- inspect.getsourcefile = PATCHED_getsourcefile
+ inspect.findsource = DEBUG_findsource
lines, start = inspect.getsourcelines(make_cli_markdown)
finally:
- inspect.getsourcefile = ORIGINAL_getsourcefile
- '''
+ inspect.getsourcefile = ORIGINAL_findsource
return link.format(
func=make_cli_markdown,
|
ocs_ci/ocs/resources/pvc.py
- Added backed_sc method | @@ -100,6 +100,7 @@ class PVC(OCS):
"""
return self.data.get('spec').get('accessModes')[0]
+ @property
def backed_sc(self):
"""
Returns the storage class of pvc object in namespace
|
Updated: return type for getweakrefs
Fixes | import sys
-from typing import Any, Callable, Generic, Optional, TypeVar, overload
+from typing import Any, Callable, Generic, List, Optional, TypeVar, overload
if sys.version_info >= (3, 9):
from types import GenericAlias
@@ -25,7 +25,7 @@ class ReferenceType(Generic[_T]):
ref = ReferenceType
def getweakrefcount(__object: Any) -> int: ...
-def getweakrefs(object: Any) -> int: ...
+def getweakrefs(object: Any) -> List[Any]: ...
@overload
def proxy(object: _C, callback: Optional[Callable[[_C], Any]] = ...) -> CallableProxyType: ...
|
Remove deprecated nova commands
novaclient.cert was removed 18 months ago
this commit removes those calls. | "availability_zones_find": "availability_zones.find",
"availability_zones_findall": "availability_zones.findall",
"availability_zones_list": "availability_zones.list",
- "certs_convert_into_with_meta": "certs.convert_into_with_meta",
- "certs_create": "certs.create",
- "certs_get": "certs.get",
"flavor_access_add_tenant_access": "flavor_access.add_tenant_access",
"flavor_access_convert_into_with_meta": "flavor_access.convert_into_with_meta",
"flavor_access_find": "flavor_access.find",
|
add `dispenser_tx_hash` field
added `dispenser_tx_hash` field to `dispenses` table to link dispenses directly to dispenser | @@ -64,6 +64,7 @@ def initialise(db):
destination TEXT,
asset TEXT,
dispense_quantity INTEGER,
+ dispenser_tx_hash TEXT,
PRIMARY KEY (tx_index, dispense_index, source, destination),
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
@@ -310,10 +311,11 @@ def dispense(db, tx):
'source': tx['destination'],
'destination': tx['source'],
'asset': dispenser['asset'],
- 'dispense_quantity': actually_given
+ 'dispense_quantity': actually_given,
+ 'dispenser_tx_hash': dispenser['tx_hash']
}
- sql = 'INSERT INTO dispenses(tx_index, dispense_index, tx_hash, block_index, source, destination, asset, dispense_quantity) \
- VALUES(:tx_index, :dispense_index, :tx_hash, :block_index, :source, :destination, :asset, :dispense_quantity);'
+ sql = 'INSERT INTO dispenses(tx_index, dispense_index, tx_hash, block_index, source, destination, asset, dispense_quantity, dispsenser_tx_hash) \
+ VALUES(:tx_index, :dispense_index, :tx_hash, :block_index, :source, :destination, :asset, :dispense_quantity, :dispenser_tx_hash);'
cursor.execute(sql, bindings)
dispense_index += 1
|
socket: fix default mode for makefile
Fixes | @@ -638,13 +638,11 @@ class socket:
else:
def listen(self, __backlog: int) -> None: ...
# Note that the makefile's documented windows-specific behavior is not represented
- if sys.version_info < (3,):
- def makefile(self, mode: unicode = ..., buffering: int = ...) -> BinaryIO: ...
- else:
+ if sys.version_info >= (3,):
# mode strings with duplicates are intentionally excluded
@overload
def makefile(self,
- mode: Literal['r', 'w', 'rw', 'wr', ''],
+ mode: Literal['r', 'w', 'rw', 'wr', ''] = ...,
buffering: Optional[int] = ...,
*,
encoding: Optional[str] = ...,
@@ -652,12 +650,14 @@ class socket:
newline: Optional[str] = ...) -> TextIO: ...
@overload
def makefile(self,
- mode: Literal['b', 'rb', 'br', 'wb', 'bw', 'rwb', 'rbw', 'wrb', 'wbr', 'brw', 'bwr'] = ...,
+ mode: Literal['b', 'rb', 'br', 'wb', 'bw', 'rwb', 'rbw', 'wrb', 'wbr', 'brw', 'bwr'],
buffering: Optional[int] = ...,
*,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
newline: Optional[str] = ...) -> BinaryIO: ...
+ else:
+ def makefile(self, mode: unicode = ..., buffering: int = ...) -> BinaryIO: ...
def recv(self, bufsize: int, flags: int = ...) -> bytes: ...
def recvfrom(self, bufsize: int, flags: int = ...) -> Tuple[bytes, _RetAddress]: ...
|
Cluster REST API changes
Pass the config without the additional dict envelope
Use the same endpoint for starting and joining | @@ -307,12 +307,21 @@ class Cluster(ClusterResourceBase):
@rest_decorators.marshal_with(ClusterState)
def put(self, cluster):
"""
- Start the "create cluster" execution.
+ Join the current manager to the cluster, or start a new one.
- The created cluster will already have one node (the current manager).
+ If created, the cluster will already have one node (the current
+ manager).
"""
- request_dict = get_json_and_verify_params({'config'})
- return cluster.start(request_dict['config'])
+ config = get_json_and_verify_params({
+ 'host_ip': {'type': unicode},
+ 'node_name': {'type': unicode},
+ 'encryption_key': {'type': unicode},
+ 'join_addrs': {'type': list, 'optional': True},
+ })
+ if 'join_addrs' in config:
+ return cluster.join(config)
+ else:
+ return cluster.start(config)
@rest_decorators.exceptions_handled
@rest_decorators.marshal_with(ClusterState)
@@ -322,8 +331,8 @@ class Cluster(ClusterResourceBase):
Use this to change settings or promote a replica machine to master.
"""
- request_dict = get_json_and_verify_params({'config'})
- return cluster.update_config(request_dict['config'])
+ config = get_json_and_verify_params()
+ return cluster.update_config(config)
class ClusterNodes(ClusterResourceBase):
@@ -348,15 +357,6 @@ class ClusterNodesId(ClusterResourceBase):
"""
return cluster.get_node(node_id)
- @rest_decorators.exceptions_handled
- @rest_decorators.marshal_with(ClusterState)
- def put(self, node_id, cluster):
- """
- Join the current manager to the cluster.
- """
- request_dict = get_json_and_verify_params({'config'})
- return cluster.join(request_dict['config'])
-
@rest_decorators.exceptions_handled
@rest_decorators.marshal_with(ClusterNode)
def delete(self, node_id, cluster):
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.