message
stringlengths
13
484
diff
stringlengths
38
4.63k
BUG: added tag to Instrument repr Added a 'tag' output to the Instrument `__repr__`.
@@ -563,7 +563,8 @@ class Instrument(object): # Create string for other parts Instrument instantiation out_str = "".join(["pysat.Instrument(platform='", self.platform, - "', name='", self.name, "', inst_id='", self.inst_id, + "', name='", self.name, "', tag='", self.tag, + "', inst_id='", self.inst_id, "', clean_level='", self.clean_level, "', pad={:}, orbit_info=".format(self.pad), "{:}, ".format(self.orbit_info),
Log ODCS request ID after creation For easier identification of the request in logs
@@ -130,8 +130,10 @@ class ODCSClient(object): response = self.session.post('{}/composes/'.format(self.url.rstrip('/')), json=body) response.raise_for_status() + odcs_resp = response.json() + logger.info("Started compose: %s", odcs_resp['id']) - return response.json() + return odcs_resp def renew_compose(self, compose_id, sigkeys=None): """Renew, or extend, existing compose
validate: remove objectstore from osd options schema objectstore is not a valid option, it's osd_objectstore and it's already validated in install_options
@@ -212,7 +212,6 @@ rados_options = ( osd_options = ( (optional("dmcrypt"), types.boolean), ("osd_scenario", validate_osd_scenarios), - (optional("objectstore"), validate_objectstore), ) collocated_osd_scenario = ("devices", iterables.AllItems(types.string))
Add Windows Beep. Force include of windows beep if sending a beep.
+import os import time from threading import Thread, Lock @@ -356,6 +357,10 @@ class Interpreter(Module): elif command == COMMAND_WAIT_FINISH: self.wait_finish() elif command == COMMAND_BEEP: + if os.name == 'nt': + import winsound + winsound.Beep(900, 500) + else: print('\a') # Beep. elif command == COMMAND_FUNCTION: if len(values) >= 1:
Switch ASGI task order This has had some positive affect to mitigate the Uvicorn race condition issue but only on a Linux system. As it shouldn't really make any difference it is worth trying whilst Uvicorn is worked on.
@@ -17,8 +17,8 @@ class ASGIHTTPConnection: async def __call__(self, receive: Callable, send: Callable) -> None: request = self._create_request_from_scope() - handler_task = asyncio.ensure_future(self.handle_request(request, send)) receiver_task = asyncio.ensure_future(self.handle_messages(request, receive)) + handler_task = asyncio.ensure_future(self.handle_request(request, send)) _, pending = await asyncio.wait( [handler_task, receiver_task], return_when=asyncio.FIRST_COMPLETED, ) @@ -98,8 +98,8 @@ class ASGIWebsocketConnection: async def __call__(self, receive: Callable, send: Callable) -> None: websocket = self._create_websocket_from_scope(send) - handler_task = asyncio.ensure_future(self.handle_websocket(websocket, send)) receiver_task = asyncio.ensure_future(self.handle_messages(receive)) + handler_task = asyncio.ensure_future(self.handle_websocket(websocket, send)) _, pending = await asyncio.wait( [handler_task, receiver_task], return_when=asyncio.FIRST_COMPLETED, )
add documentation for auto skills Added SUSI Smart Speaker Workflow Update issue templates Update issue templates Delete ISSUE_TEMPLATE.md Delete PULL_REQUEST_TEMPLATE.md Update issue templates folder path updated
@@ -6,6 +6,11 @@ The Media Discovery Daemon is the daemon that we are using to detect whether a U If a new USB connection is detected, the python script `auto_skills.py` is triggered which creates a custom skill in the SUSI server and allows the user to play music from the USB device. But if the USB device is removed, the skill file is removed and the server functions normally. <p/> +<p> +The `autostart.sh` acts as a starting point for executing the `auto_skills.py` which in turn creates the custom skill under '$HOME/SUSI.AI/susi_linux/susi_server/susi_server/data/generic_skills/media_discovery/' as custom_skill.txt . The custom skill contains a JSON response of the audio play skill with file identifiers USB name and mp3 file name. +The custom_skill.txt is removed from the directory path when the `autostop.sh` gets executed. +</p> + Libraries/ Modules used in the package We are using: * `udev` lib to monitor the status of the USB connection
README.md - add color to code block Since other markdown files are tutorials linked to the document website, I will only change the README.md file.
AutoGluon automates machine learning tasks enabling you to easily achieve strong predictive performance in your applications. With just a few lines of code, you can train and deploy high-accuracy deep learning models on image, text, and tabular data. Get started with: -``` +```python # First install package from terminal: pip install mxnet autogluon from autogluon import TabularPrediction as task
Add socket enum classes from py3.4+ * Add socket enum classes from py3.4+ Adds four IntEnum classes in the socket module that mirror the AF_, AI_, MSG_, and SOCK_ sets of constants. * Update socket AddressInfo/MsgFlag to use IntFlag type * IntFlag, AddressInfo, and MsgFlag are py 3.6+
# see: http://hg.python.org/cpython/file/3d0686d90f55/Lib/socket.py # see: http://nullege.com/codes/search/socket +import sys from typing import Any, Tuple, List, Optional, Union, overload # ----- variables and constants ----- @@ -250,6 +251,73 @@ TIPC_WITHDRAWN = 0 TIPC_ZONE_SCOPE = 0 +# enum versions of above flags py 3.4+ +if sys.version_info >= (3, 4): + from enum import IntEnum + + class AddressFamily(IntEnum): + AF_UNIX = ... + AF_INET = ... + AF_INET6 = ... + AF_APPLETALK = ... + AF_ASH = ... + AF_ATMPVC = ... + AF_ATMSVC = ... + AF_AX25 = ... + AF_BLUETOOTH = ... + AF_BRIDGE = ... + AF_DECnet = ... + AF_ECONET = ... + AF_IPX = ... + AF_IRDA = ... + AF_KEY = ... + AF_LLC = ... + AF_NETBEUI = ... + AF_NETLINK = ... + AF_NETROM = ... + AF_PACKET = ... + AF_PPPOX = ... + AF_ROSE = ... + AF_ROUTE = ... + AF_SECURITY = ... + AF_SNA = ... + AF_TIPC = ... + AF_UNSPEC = ... + AF_WANPIPE = ... + AF_X25 = ... + + class SocketKind(IntEnum): + SOCK_STREAM = ... + SOCK_DGRAM = ... + SOCK_RAW = ... + SOCK_RDM = ... + SOCK_SEQPACKET = ... + SOCK_CLOEXEC = ... + SOCK_NONBLOCK = ... + +if sys.version_info >= (3, 6): + from enum import IntFlag + + class AddressInfo(IntFlag): + AI_ADDRCONFIG = ... + AI_ALL = ... + AI_CANONNAME = ... + AI_NUMERICHOST = ... + AI_NUMERICSERV = ... + AI_PASSIVE = ... + AI_V4MAPPED = ... + + class MsgFlag(IntFlag): + MSG_CTRUNC = ... + MSG_DONTROUTE = ... + MSG_DONTWAIT = ... + MSG_EOR = ... + MSG_OOB = ... + MSG_PEEK = ... + MSG_TRUNC = ... + MSG_WAITALL = ... + + # ----- exceptions ----- class error(IOError): ...
integ-tests: update Slurm test to be compatible with EnforcePartLimits=ALL Partition limits are now enforced by the scheduler at submission time
@@ -17,7 +17,7 @@ import pytest from assertpy import assert_that from remote_command_executor import RemoteCommandExecutionError, RemoteCommandExecutor -from tests.common.assertions import assert_asg_desired_capacity, assert_no_errors_in_logs, assert_scaling_worked +from tests.common.assertions import assert_no_errors_in_logs, assert_scaling_worked from tests.common.schedulers_common import SlurmCommands from tests.schedulers.common import assert_overscaling_when_job_submitted_during_scaledown @@ -40,7 +40,7 @@ def test_slurm(region, pcluster_config_reader, clusters_factory): _test_slurm_version(remote_command_executor) _test_dynamic_max_cluster_size(remote_command_executor, region, cluster.asg) - _test_cluster_limits(remote_command_executor, max_queue_size, region, cluster.asg) + _test_cluster_limits(remote_command_executor, max_queue_size) _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_queue_size) _test_job_arrays_and_parallel_jobs(remote_command_executor, region, cluster.cfn_name, scaledown_idletime) assert_overscaling_when_job_submitted_during_scaledown( @@ -115,11 +115,14 @@ def _test_job_dependencies(remote_command_executor, region, stack_name, scaledow _assert_job_completed(remote_command_executor, dependent_job_id) -def _test_cluster_limits(remote_command_executor, max_queue_size, region, asg_name): - logging.info("Testing cluster doesn't scale when job requires a capacity that is higher than the max available") - slurm_commands = SlurmCommands(remote_command_executor) - result = slurm_commands.submit_command("sleep 1000", nodes=max_queue_size + 1) - max_nodes_job_id = slurm_commands.assert_job_submitted(result.stdout) +def _test_cluster_limits(remote_command_executor, max_queue_size): + logging.info("Testing scheduler rejects jobs that require a capacity that is higher than the max available") + + # Check node limit job is rejected at submission + result = remote_command_executor.run_remote_command( + "sbatch -N {0} --wrap='sleep 1'".format(max_queue_size + 1), raise_on_error=False + ) + assert_that(result.stdout).contains("sbatch: error: Batch job submission failed: Node count specification invalid") # Check cpu limit job is rejected at submission result = remote_command_executor.run_remote_command( @@ -128,12 +131,6 @@ def _test_cluster_limits(remote_command_executor, max_queue_size, region, asg_na assert_that(result.stdout).contains( "sbatch: error: Batch job submission failed: Requested node configuration is not available" ) - # Check we are not scaling - time.sleep(60) - assert_asg_desired_capacity(region, asg_name, expected=0) - assert_that(_get_job_info(remote_command_executor, max_nodes_job_id)).contains( - "JobState=PENDING Reason=PartitionNodeLimit" - ) def _test_job_arrays_and_parallel_jobs(remote_command_executor, region, stack_name, scaledown_idletime):
UI: Rename option to disable progressbar for clarity. * Also avoid using different aliases of "no_site" Python flag in help outputs.
@@ -140,7 +140,7 @@ parser.add_option( help="""\ Python flags to use. Default is what you are using to run Nuitka, this enforces a specific mode. These are options that also exist to standard -Python executable. Currently supported: "-S" (alias "nosite"), +Python executable. Currently supported: "-S" (alias "no_site"), "static_hashes" (do not use hash randomization), "no_warnings" (do not give Python runtime warnings), "-O" (alias "no_asserts"), "no_docstrings" (do not use docstrings). Default empty.""", @@ -805,7 +805,7 @@ Defaults to off.""", ) tracing_group.add_option( - "--no-progress", + "--no-progressbar", action="store_false", dest="progress_bar", default=True,
[Doc] Some functions used in the tutorial are deprecated * Some functions used in the tutorial are deprecated I'm receiving register_message_func and register_reduce_func are deprecated error. I have updated the code such that message and reduce functions are passed to g.send and g.recv * Update 3_pagerank.py.bak
@@ -96,12 +96,6 @@ def pagerank_reduce_func(nodes): # # .. image:: https://i.imgur.com/kIMiuFb.png # -# Register the message function and reduce function, which will be called -# later by DGL. - -g.register_message_func(pagerank_message_func) -g.register_reduce_func(pagerank_reduce_func) - ############################################################################### # The algorithm is straightforward. Here is the code for one @@ -110,10 +104,10 @@ g.register_reduce_func(pagerank_reduce_func) def pagerank_naive(g): # Phase #1: send out messages along all edges. for u, v in zip(*g.edges()): - g.send((u, v)) + g.send((u, v), pagerank_message_func) # Phase #2: receive messages to compute new PageRank values. for v in g.nodes(): - g.recv(v) + g.recv(v, pagerank_reduce_func) ############################################################################### @@ -125,8 +119,8 @@ def pagerank_naive(g): # on multiple nodes and edges at one time. def pagerank_batch(g): - g.send(g.edges()) - g.recv(g.nodes()) + g.send(g.edges(), pagerank_message_func) + g.recv(g.nodes(), pagerank_reduce_func) ###############################################################################
Add some spacing [#OSF-7253]
@@ -428,7 +428,6 @@ var renderWeeklyUserGainChart = function (results) { } userGainChart.parseRawData({result: data}).render(); }); - }; @@ -604,7 +603,6 @@ var UserGainMetrics = function() { renderPreviousWeekOfUsersByStatus(); NodeLogsPerUser(); - }; @@ -792,7 +790,6 @@ var ActiveUserMetrics = function() { // Average Projects per MAU renderCalculationBetweenTwoQueries(totalProjectsQuery, monthlyActiveUsersQuery, "#projects-per-monthly-user", null, 'division'); - }; // <+><+><+><+><+><+><+<+>+ @@ -917,7 +914,6 @@ var RawNumberMetrics = function() { } } }); - }; // <+><+><+><><>+
tools: Include `test_core.py` to be checked by mypy. This commit adds `test_core.py` to the `type_consistent_testfiles` list to check for type consistency with mypy.
@@ -77,7 +77,7 @@ repo_python_files['zulipterminal'] = [] repo_python_files['tests'] = [] # Added incrementally as newer test files are type-annotated. -type_consistent_testfiles = ["test_run.py"] +type_consistent_testfiles = ["test_run.py", "test_core.py"] for file_path in python_files: repo = PurePath(file_path).parts[0]
Improve timeout in delete Improved error handling
@@ -1357,16 +1357,20 @@ def delete(name, timeout=90): handle_scm, name, win32service.SERVICE_ALL_ACCESS) except pywintypes.error as exc: raise CommandExecutionError( - 'Failed To Open {0}: {1}'.format(name, exc[2])) + 'Failed to open {0}. {1}'.format(name, exc.strerror)) + try: win32service.DeleteService(handle_svc) - + except pywintypes.error as exc: + raise CommandExecutionError( + 'Failed to delete {0}. {1}'.format(name, exc.strerror)) + finally: + log.debug('Cleaning up') win32service.CloseServiceHandle(handle_scm) win32service.CloseServiceHandle(handle_svc) - attempts = 0 - while name in get_all() and attempts <= timeout: + end_time = time.time() + int(timeout) + while name in get_all() and time.time() < end_time: time.sleep(1) - attempts += 1 return name not in get_all()
Update plaso.mappings * Update plaso.mappings Closes * Update data/plaso.mappings
"version": { "type": "text", "fields": {"keyword": {"type": "keyword"}} + }, + "http_response_bytes": { + "type": "text", + "fields": {"keyword": {"type": "keyword"}} } } }
Fix downloading ftp artifacts callback was set incorrectly
@@ -121,4 +121,4 @@ class FTPArtifactRepository(ArtifactRepository): if remote_file_path else self.path with self.get_ftp_client() as ftp: with open(local_path, 'wb') as f: - ftp.retrbinary('RETR ' + remote_full_path, f) + ftp.retrbinary('RETR ' + remote_full_path, f.write)
Fix metadata links Fixes
@@ -75,7 +75,7 @@ metadata in an efficient schema-defined binary format using {func}`python:struct ### JSON When `json` is specified as the `codec` in the schema the metadata is encoded in -the human readable `JSON <https://www.json.org/json-en.html>`_ format. As this format +the human readable [JSON](https://www.json.org/json-en.html) format. As this format is human readable and encodes numbers as text it uses more bytes than the `struct` format. However it is simpler to configure as it doesn't require any format specifier for each type in the schema. Default values for properties can be specified for only @@ -355,9 +355,8 @@ schema = metadata.MetadataSchema( This schema states that the metadata for each row of the table is an object consisting of two properties. Property `accession_number` is a number (stored as a 4-byte int). -Property `collection_date` is a string which must satisfy a regex, -which checks it is a valid [ISO8601](https://www.iso.org/iso-8601-date-and-time-format -.html) date. +Property `collection_date` is a string which must satisfy a regex, which checks it is +a valid [ISO8601](https://www.iso.org/iso-8601-date-and-time-format.html) date. Both properties are required to be specified (this must always be done for the struct codec, for the JSON codec properties can be optional). Any other properties are not allowed (`additionalProperties` is false), this is also needed
Toyota: add missing engine and esp FW for Corolla Cross Hybrid add missing engine and esp FW for CorollaCross Hybrid DongleId
@@ -783,6 +783,7 @@ FW_VERSIONS = { b'\x01896637626000\x00\x00\x00\x00', b'\x01896637648000\x00\x00\x00\x00', b'\x01896637643000\x00\x00\x00\x00', + b'\x02896630A21000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00', b'\x02896630ZJ5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00', b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00', b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00', @@ -827,6 +828,7 @@ FW_VERSIONS = { b'F152612A10\x00\x00\x00\x00\x00\x00', b'F152612D00\x00\x00\x00\x00\x00\x00', b'F152616011\x00\x00\x00\x00\x00\x00', + b'F152616060\x00\x00\x00\x00\x00\x00', b'F152642540\x00\x00\x00\x00\x00\x00', b'F152676293\x00\x00\x00\x00\x00\x00', b'F152676303\x00\x00\x00\x00\x00\x00',
added bruggeman values for electrodes after fixing the solid tortuosities these new values are needed to run the notebook
" 'Negative electrode active material volume fraction': 0.75,\n", " 'Negative particle radius [m]': 5.86e-06,\n", " 'Negative electrode Bruggeman coefficient (electrolyte)': 1.5,\n", + " 'Negative electrode Bruggeman coefficient (electrode)': 1.5,\n", " 'Negative electrode electrons in reaction': 1.0,\n", " 'Negative electrode exchange-current density [A.m-2]': graphite_LGM50_electrolyte_exchange_current_density_Chen2020,\n", " 'Negative electrode OCP entropic change [V.K-1]': 0.0,\n", " 'Positive electrode active material volume fraction': 0.665,\n", " 'Positive particle radius [m]': 5.22e-06,\n", " 'Positive electrode Bruggeman coefficient (electrolyte)': 1.5,\n", + " 'Positive electrode Bruggeman coefficient (electrode)': 1.5,\n", " 'Positive electrode electrons in reaction': 1.0,\n", " 'Positive electrode exchange-current density [A.m-2]': nmc_LGM50_electrolyte_exchange_current_density_Chen2020,\n", " 'Positive electrode OCP entropic change [V.K-1]': 0.0,\n",
Add StackName to instance Name tags Make it easier to find instance belonging to a particular parallel cluster. Ran `tox -e cfn-format`
}, { "Key": "Name", - "Value": "Master" + "Value": { + "Fn::Sub": "${AWS::StackName} Master" + } }, { "Key": "aws-parallelcluster-attributes", "Tags": [ { "Key": "Name", - "Value": "Compute", + "Value": { + "Fn::Sub": "${AWS::StackName} Compute" + }, "PropagateAtLaunch": true }, {
Tests: AsyncMock is now in the standard library! The `tests/README.md` file still referenced our old custom `AsyncMock` that has been removed in favour of the standard library one that has been introduced in 3.8. This commit fixes this by updating the section.
@@ -114,7 +114,7 @@ class BotCogTests(unittest.TestCase): ### Mocking coroutines -By default, the `unittest.mock.Mock` and `unittest.mock.MagicMock` classes cannot mock coroutines, since the `__call__` method they provide is synchronous. In anticipation of the `AsyncMock` that will be [introduced in Python 3.8](https://docs.python.org/3.9/whatsnew/3.8.html#unittest), we have added an `AsyncMock` helper to [`helpers.py`](/tests/helpers.py). Do note that this drop-in replacement only implements an asynchronous `__call__` method, not the additional assertions that will come with the new `AsyncMock` type in Python 3.8. +By default, the `unittest.mock.Mock` and `unittest.mock.MagicMock` classes cannot mock coroutines, since the `__call__` method they provide is synchronous. The [`AsyncMock`](https://docs.python.org/3/library/unittest.mock.html#unittest.mock.AsyncMock) that has been [introduced in Python 3.8](https://docs.python.org/3.9/whatsnew/3.8.html#unittest) is an asynchronous version of `MagicMock` that can be used anywhere a coroutine is expected. ### Special mocks for some `discord.py` types
Only show duel stats if the user has dueled before duel_stats used to be None if the user hadn't dueled before, now we check duels_total instead Fixes
</tbody> </table> - {% if user.duel_stats %} + {% if user.duel_stats.duels_total > 0 %} <h3>Duel stats</h3> <table class="ui very basic table celled"> <tbody>
More verbose windows test run Why does it hang on the collect step?
@@ -5,4 +5,5 @@ set -e export PYTEST_ADDOPTS="--doctest-modules --junitxml=junit/test-results.xml" export PY_IGNORE_IMPORTMISMATCH=1 -poetry run pytest \ No newline at end of file +poetry run pytest --collect-only -vvv +poetry run pytest -vvv \ No newline at end of file
Add repo yaml directory to python path in dagster-graphql cli Test Plan: Run `dagster-graphql` from a different directory targeting a `repository.yaml` `dagster-graphql -p startPipelineExecution -v ... -y "/Users/sashankthupukari/projects/dagster-playground/repository.yaml"` Reviewers: #ft, natekupp
@@ -62,8 +62,13 @@ def perform_load(self): @staticmethod def from_file_target(python_file, fn_name, from_handle=None): + file_directory = os.path.dirname(python_file) + if file_directory not in sys.path: + sys.path.append(file_directory) + module_name = os.path.splitext(os.path.basename(python_file))[0] module = imp.load_source(module_name, python_file) + return LoaderEntrypoint(module, module_name, fn_name, from_handle) @staticmethod
test(stats): verify incorrect current response time percentile result test case test_get_current_response_time_percentile_outside_cache_window verifies incorrect behaviour returning None instead of 0 when time is outside window of cached times. Issue:
@@ -580,6 +580,12 @@ class TestStatsEntryResponseTimesCache(unittest.TestCase): self.assertEqual(95, s.get_current_response_time_percentile(0.95)) + def test_get_current_response_time_percentile_outside_cache_window(self): + s = StatsEntry(self.stats, "/", "GET", use_response_times_cache=True) + # an empty response times cache, current time will not be in this cache + s.response_times_cache = {} + self.assertEqual(0, s.get_current_response_time_percentile(0.95)) + def test_diff_response_times_dicts(self): self.assertEqual( {1: 5, 6: 8},
chore(pubsub): add subscriber role test for streaming Pulling the messages using a streaming pull should work with accounts having only the pubsub.subscriber role. This commits add a test that covers this aspect.
@@ -17,6 +17,7 @@ from __future__ import absolute_import import datetime import itertools import operator as op +import os import threading import time @@ -488,6 +489,45 @@ class TestStreamingPull(object): finally: subscription_future.cancel() # trigger clean shutdown + @pytest.mark.skipif( + "KOKORO_GFILE_DIR" not in os.environ, + reason="Requires Kokoro environment with a limited subscriber service account.", + ) + def test_streaming_pull_subscriber_permissions_sufficient( + self, publisher, topic_path, subscriber, subscription_path, cleanup + ): + + # Make sure the topic and subscription get deleted. + cleanup.append((publisher.delete_topic, topic_path)) + cleanup.append((subscriber.delete_subscription, subscription_path)) + + # create a topic and subscribe to it + publisher.create_topic(topic_path) + subscriber.create_subscription(subscription_path, topic_path) + + # A service account granting only the pubsub.subscriber role must be used. + filename = os.path.join( + os.environ["KOKORO_GFILE_DIR"], "pubsub-subscriber-service-account.json" + ) + streaming_pull_subscriber = type(subscriber).from_service_account_file(filename) + + # Subscribe to the topic, publish a message, and verify that subscriber + # successfully pulls and processes it. + callback = StreamingPullCallback(processing_time=0.01, resolve_at_msg_count=1) + future = streaming_pull_subscriber.subscribe(subscription_path, callback) + self._publish_messages(publisher, topic_path, batch_sizes=[1]) + + try: + callback.done_future.result(timeout=10) + except exceptions.TimeoutError: + pytest.fail( + "Timeout: receiving/processing streamed messages took too long." + ) + else: + assert 1 in callback.seen_message_ids + finally: + future.cancel() + def _publish_messages(self, publisher, topic_path, batch_sizes): """Publish ``count`` messages in batches and wait until completion.""" publish_futures = []
login: Re-raise the export compliance exception on RHSSO (prod) Pass the correct arg quay_username
@@ -268,7 +268,7 @@ def _register_service(login_service): except ExportComplianceException as ece: logger.exception("Export compliance exception", ece) return _render_export_compliance_error( - login_service.service_name(), ece.sso_username, ece.email, ece.message + login_service.service_name(), ece.sso_username, ece.email, ece.quay_username ) # Conduct login.
Set default for consul_pillar to None If these do not default to None, they will default to an empty string, which could cause the pillar tree to leak to minions it should't. Also, allow role and environment to be pulled from pillars or minion config by using config.get Fixes
@@ -189,8 +189,8 @@ def ext_pillar(minion_id, client = get_conn(__opts__, opts['profile']) - role = __salt__['grains.get']('role') - environment = __salt__['grains.get']('environment') + role = __salt__['grains.get']('role', None) + environment = __salt__['grains.get']('environment', None) # put the minion's ID in the path if necessary opts['root'] %= { 'minion_id': minion_id,
Run oe2_wms_configure with no shapefile_bucket when not provided. Apparently a blank env var isn't recognized with parallel
echo "[$(date)] Beginning WMS endpoint configuration..." >> /var/log/onearth/config.log +if [ -z "$SHAPEFILE_BUCKET" ] +then + grep -l mapserver /etc/onearth/config/endpoint/*.yaml | parallel -j 4 python3.6 /usr/bin/oe2_wms_configure.py {} >> /var/log/onearth/config.log 2>&1 +else grep -l mapserver /etc/onearth/config/endpoint/*.yaml | parallel -j 4 python3.6 /usr/bin/oe2_wms_configure.py {} --shapefile_bucket "${SHAPEFILE_BUCKET}" >> /var/log/onearth/config.log 2>&1 - +fi echo "[$(date)] Completed WMS endpoint configuration" >> /var/log/onearth/config.log \ No newline at end of file
Env switch to hide warning when --run-dir is specified Using this for remote runs to suppress message when we control the run dir.
@@ -374,6 +374,7 @@ def _op_run_dir(args, ctx): % click_util.cmd_help(ctx)) if args.run_dir: run_dir = os.path.abspath(args.run_dir) + if os.getenv("NO_WARN_RUNDIR") != "1": cli.note( "Run directory is '%s' (results will not be visible to Guild)" % run_dir)
update documentation The Spearman correlation should be 1 or -1 if the relation is monotonic, not only linear
@@ -423,7 +423,7 @@ def spearmanr(x, y, use_ties=True): Spearman correlation does not assume that both datasets are normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or - +1 imply an exact linear relationship. Positive correlations imply that + +1 imply a monotonic relationship. Positive correlations imply that as `x` increases, so does `y`. Negative correlations imply that as `x` increases, `y` decreases.
Warn about bindings used while supposed to be ignored TN:
@@ -809,6 +809,14 @@ class AbstractVariable(AbstractExpression): if self.abstract_var and self.abstract_var.source_name else None) + @property + def ignored(self): + """ + If this comes from the language specification, return whether it is + supposed to be ignored. Return False otherwise. + """ + return self.abstract_var.ignored if self.abstract_var else False + def __repr__(self): src_name = self.source_name return '<AbstractVariable.Expr {}{}>'.format( @@ -845,7 +853,7 @@ class AbstractVariable(AbstractExpression): Cache used to memoize the "construct" method. """ - self.ignored = False + self.ignored = source_name == names.Name.from_lower('_') def add_to_scope(self, scope): """ @@ -1977,35 +1985,46 @@ class PropertyDef(AbstractNodeData): def warn_on_unused_bindings(self): """ Emit warnings for bindings such as variables or arguments, that are not - used. + used. Also emit warnings for bindings that are used whereas they have + been tagged as ignored. """ # Mapping to tell for each variable if it is referenced at least once - ignored_name = names.Name.from_lower('_') - used_vars = { + all_vars = { var: False for var in (self.constructed_expr.bindings + [construct(arg) for arg in self.explicit_argument_vars]) - if (var.source_name != ignored_name - and (var.abstract_var is None - or not var.abstract_var.ignored)) } def mark_vars(expr): if isinstance(expr, AbstractVariable.Expr): - used_vars[expr] = True + all_vars[expr] = True for sub in expr.subexprs: mark_vars(sub) mark_vars(self.constructed_expr) - unused_vars = [var for var, is_used in used_vars.items() - if not is_used] + unused_vars = [var for var, is_used in all_vars.items() + if not is_used and not var.ignored] + wrongly_used_vars = [var for var, is_used in all_vars.items() + if is_used and var.ignored] + + unused_vars.sort(key=lambda var: var.name) + wrongly_used_vars.sort(key=lambda var: var.name) + + def format_list(vars): + return ', '.join( + (var.source_name or var.name).lower + for var in vars + ) + warn_if( unused_vars, 'The following bindings are not used: {}'.format( - ', '.join((var.source_name or var.name).lower - for var in sorted(unused_vars, - key=lambda var: var.name)))) + format_list(unused_vars))) + warn_if( + wrongly_used_vars, + 'The following bindings are used even though they are supposed to' + ' be ignored: {}'.format(format_list(wrongly_used_vars))) def ExternalProperty(type=None, doc="", **kwargs):
Update spideybot_stealer.txt Aliases field is added.
# Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/) # See the file 'LICENSE' for copying permission +# Aliases: blueface, spideybot + # Reference: https://twitter.com/malwrhunterteam/status/1182010489938857993 # Reference: https://twitter.com/VK_Intel/status/1182142320466186241 # Reference: https://twitter.com/malwrhunterteam/status/1182330508463788033
ima: Remove 'main' function from ima.py Remove the 'main' function from ima.py. This looks like some old test case that probably nobody has used in a long time.
@@ -701,43 +701,3 @@ def read_excllist(exclude_path: Optional[str] = None) -> List[str]: logger.debug("Loaded exclusion list from %s: %s", exclude_path, excl_list) return excl_list - - -def main() -> None: - allowlist_path = "allowlist.txt" - print(f"reading allowlist from {allowlist_path}") - - exclude_path = "exclude.txt" - # exclude_path = '../scripts/ima/exclude.txt' - print(f"reading exclude list from {exclude_path}") - - al_data = read_allowlist(allowlist_path) - excl_data = read_excllist(exclude_path) - lists = process_ima_policy(al_data, excl_data) - - measure_path = config.IMA_ML - # measure_path='../scripts/ima/ascii_runtime_measurements_ima' - # measure_path = '../scripts/gerardo/ascii_runtime_measurements' - print(f"reading measurement list from {measure_path}") - with open(measure_path, encoding="ascii") as f: - lines = f.readlines() - - with open("measure2allow.txt", "w", encoding="utf-8") as m2a: - digest = process_measurement_list(AgentAttestState("1"), lines, lists, m2a) - print(f"final digest is {digest}") - - print("using m2a") - - al_data = read_allowlist("measure2allow.txt") - excl_data = read_excllist(exclude_path) - lists2 = process_ima_policy(al_data, excl_data) - process_measurement_list(AgentAttestState("2"), lines, lists2) - - print("done") - - -if __name__ == "__main__": - try: - main() - except Exception as e: - logger.exception(e)
Update warning message for autograd issue + XLA backend Summary: Pull Request resolved:
@@ -392,9 +392,12 @@ void handle_view_on_rebase(DifferentiableViewMeta* diff_view_meta, bool indirect } else { msg = "This view requires gradients and it's being modified inplace. "; } - msg = c10::str(msg, "Backward through inplace update on view tensors is WIP for XLA backwend. " - "Gradient might be wrong in certain cases. Running forward alone is fine. " - "To work around it, please replace the inplace operation by an out-of-place one."); + msg = c10::str(msg, "Running a backward pass through an inplace update on view tensors is a WIP " + "for the XLA backend and may result in incorrect gradient computation in certain cases. " + "Note this warning is being triggered on the inplace update (not the corresponding backward pass), " + "and this update is safe if a backward pass is not run. " + "To work around this limitation and to silence this warning, " + "please replace the inplace operation by the corresponding out-of-place operation."); TORCH_WARN(msg); }
Fill in notBefore/notAfter in X509 _PKeyInteractionTestsMixin tests While the tests currently pass without it, this is because OpenSSL's encoder doesn't notice that it is emitting garbage. See Fill in a placeholder validity period so the tests both better mirror real X.509 signing code and do not rely on this bug.
@@ -1468,7 +1468,7 @@ class _PKeyInteractionTestsMixin: def signable(self): """ - Return something with a `set_pubkey`, `set_pubkey`, and `sign` method. + Return something with `set_pubkey` and `sign` methods. """ raise NotImplementedError() @@ -1715,7 +1715,12 @@ class TestX509(_PKeyInteractionTestsMixin): """ Create and return a new `X509`. """ - return X509() + certificate = X509() + # Fill in placeholder validity values. signable only expects to call + # set_pubkey and sign. + certificate.gmtime_adj_notBefore(-24 * 60 * 60) + certificate.gmtime_adj_notAfter(24 * 60 * 60) + return certificate def test_type(self): """
Django 1.10+, is_authenticated is a property Minor update: Does work as a property and a method up until Django 2.0
@@ -50,7 +50,7 @@ The base view for this is :py:class:`~dal_select2.views.Select2QuerySetView`. class CountryAutocomplete(autocomplete.Select2QuerySetView): def get_queryset(self): # Don't forget to filter out results depending on the visitor ! - if not self.request.user.is_authenticated(): + if not self.request.user.is_authenticated: return Country.objects.none() qs = Country.objects.all() @@ -423,7 +423,7 @@ filter as such in the view: class CountryAutocomplete(autocomplete.Select2QuerySetView): def get_queryset(self): - if not self.request.user.is_authenticated(): + if not self.request.user.is_authenticated: return Country.objects.none() qs = Country.objects.all() @@ -484,7 +484,7 @@ And the following autocomplete view for country: class CountryAutocomplete(autocomplete.Select2QuerySetView): def get_queryset(self): - if not self.request.is_authenticated(): + if not self.request.is_authenticated: return Country.objects.none() qs = Country.objects.all()
fix: Depend on replica details being there for Replica ConnectionPool This logic mirror how replica connections are handled
@@ -102,19 +102,18 @@ class MariaDBConnectionUtil: If frappe.conf.disable_database_connection_pooling is set, return a new connection object and close existing pool if exists. Else, return a connection from the pool. """ - # get pooled connection global _SITE_POOLS if frappe.conf.disable_database_connection_pooling: self.close_connection_pools() return self.create_connection() - is_read_only_conn = hasattr(frappe.local, "primary_db") + read_only = frappe.conf.read_from_replica and frappe.conf.replica_host if frappe.local.site not in _SITE_POOLS: - site_pool = self.create_connection_pool(read_only=is_read_only_conn) + site_pool = self.create_connection_pool(read_only=read_only) else: - site_pool = self.get_connection_pool(read_only=is_read_only_conn) + site_pool = self.get_connection_pool(read_only=read_only) try: conn = site_pool.get_connection()
Add date header testing This compliments ensuring the functionality is tested.
+from datetime import datetime, timezone + +import hypothesis.strategies as strategies import pytest +from hypothesis import given from quart.wrappers.response import Response @@ -17,3 +21,17 @@ def test_response_cache_control() -> None: assert response.headers['Cache-Control'] == 'max-age=2' response.cache_control.no_cache = True assert response.headers['Cache-Control'] == 'max-age=2,no-cache' + + +@given( + value=strategies.datetimes( + timezones=strategies.just(timezone.utc), min_value=datetime(1900, 1, 1), + ), +) [email protected]('header', ['date', 'expires', 'last_modified', 'retry_after']) +def test_datetime_headers(header: str, value: datetime) -> None: + response = Response(b'Body') + value = value.replace(microsecond=0) + setattr(response, header, value) + assert response.headers.get(header.title().replace('_', '-')) + assert getattr(response, header) == value
Fix scheduler image in helm chart Summary: Reimplement D5830 since it was lost because of my bad rebase Test Plan: bk Reviewers: catherinewu, nate, dgibson
@@ -242,7 +242,7 @@ scheduler: # (to call `dagster api launch_scheduled_execution`) and instance yaml # but does not need access to user code. image: - repository: "dagster/k8s-dagster" + repository: "dagster/k8s-dagit" tag: "latest" pullPolicy: Always
Bump LCB major version for Linux Tested-by: Build Bot Tested-by: Ellis Breen
@@ -177,7 +177,7 @@ class CBuildInfo: plat = get_plat_code() print("Got platform {}".format(plat)) - default = ['libcouchbase.so.5'] + default = ['libcouchbase.so.6'] return {'darwin': ['libcouchbase.2.dylib', 'libcouchbase.dylib'], 'linux': default, 'win': ['libcouchbase_d.dll','libcouchbase.dll']}.get(get_plat_code(), default)
Move p4d tests on PDX (us-west-2) Move half p4d tests on PDX (us-west-2)
@@ -535,7 +535,7 @@ multiple_nics: instances: ["p4d.24xlarge"] oss: ["alinux2", "ubuntu1604", "centos8"] schedulers: ["slurm"] - - regions: ["us-east-1"] + - regions: ["us-west-2"] instances: ["p4d.24xlarge"] oss: ["alinux", "ubuntu1804", "centos7"] schedulers: ["slurm"]
Add NOSIGNAL=1 option Required when using pycurl from multiple threads [1]. Not sure if we are but better safe than sorry. [1]
@@ -101,6 +101,7 @@ def request(method: str, url: str, **kwargs) -> Response: """ c = pycurl.Curl() + c.setopt(pycurl.NOSIGNAL, 1) c.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS) c.setopt(pycurl.OPENSOCKETFUNCTION, _opensocket) c.setopt(pycurl.FOLLOWLOCATION, True) # Allow redirects
Only collect "*.conf" for nginx * Some unused files have syntax error, as a result, it won't be hit even there is a hit for the used files.
@@ -475,9 +475,9 @@ class DefaultSpecs(Specs): nfs_exports = simple_file("/etc/exports") nfs_exports_d = glob_file("/etc/exports.d/*.exports") nginx_conf = glob_file([ - "/etc/nginx/*.conf", "/etc/nginx/conf.d/*", "/etc/nginx/default.d/*", - "/opt/rh/nginx*/root/etc/nginx/*.conf", "/opt/rh/nginx*/root/etc/nginx/conf.d/*", "/opt/rh/nginx*/root/etc/nginx/default.d/*", - "/etc/opt/rh/rh-nginx*/nginx/*.conf", "/etc/opt/rh/rh-nginx*/nginx/conf.d/*", "/etc/opt/rh/rh-nginx*/nginx/default.d/*" + "/etc/nginx/*.conf", "/etc/nginx/conf.d/*.conf", "/etc/nginx/default.d/*.conf", + "/opt/rh/nginx*/root/etc/nginx/*.conf", "/opt/rh/nginx*/root/etc/nginx/conf.d/*.conf", "/opt/rh/nginx*/root/etc/nginx/default.d/*.conf", + "/etc/opt/rh/rh-nginx*/nginx/*.conf", "/etc/opt/rh/rh-nginx*/nginx/conf.d/*.conf", "/etc/opt/rh/rh-nginx*/nginx/default.d/*.conf" ]) nginx_ssl_cert_enddate = foreach_execute(ssl_certificate.nginx_ssl_certificate_files, "/usr/bin/openssl x509 -in %s -enddate -noout") nmcli_conn_show = simple_command("/usr/bin/nmcli conn show")
Fix for nightly builds Summary: Being tested on nightlies manually. Pull Request resolved:
@@ -1654,7 +1654,7 @@ void addGlobalMethods(py::module& m) { &pred_net, external_inputs, tensor_shapes, - {}); + std::unordered_set<int>()); std::string pred_net_str2; pred_net.SerializeToString(&pred_net_str2); return py::bytes(pred_net_str2);
Simplify AjaxCreateView Screams in reading the documentation
@@ -150,23 +150,24 @@ class AjaxCreateView(AjaxMixin, CreateView): """ def get(self, request, *args, **kwargs): + """ Creates form with initial data, and renders JSON response """ - response = super(CreateView, self).get(request, *args, **kwargs) - - if request.is_ajax(): - # Initialize a a new form - form = self.form_class(initial=self.get_initial()) + super(CreateView, self).get(request, *args, **kwargs) + form = self.get_form() return self.renderJsonResponse(request, form) - else: - return response - def post(self, request, *args, **kwargs): - form = self.form_class(data=request.POST, files=request.FILES) + """ Responds to form POST. Validates POST data and returns status info. - if request.is_ajax(): + Steps: + 1. Validate POST form data + 2. If valid, save form + 3. Return status info (success / failure) + """ + form = self.get_form() + # Extra JSON data sent alongside form data = { 'form_valid': form.is_valid(), } @@ -181,9 +182,6 @@ class AjaxCreateView(AjaxMixin, CreateView): return self.renderJsonResponse(request, form, data) - else: - return super(CreateView, self).post(request, *args, **kwargs) - class AjaxUpdateView(AjaxMixin, UpdateView):
Update attacked_text.py add words_diff_num
@@ -237,22 +237,9 @@ class AttackedText: indices = set() w1 = self.words w2 = other_attacked_text.words - idx1 = 0 - idx2 = 0 - flag = False - while (idx1 < len(w1) and idx2 < len(w2)): - if w1[idx1] == w2[idx2]: - flag = False - idx1 += 1 - idx2 += 1 - elif flag == False: - flag = True - indices.add(idx1) - idx1 += 1 - idx2 += 1 - else: - flag = False - idx2 += 1 + for i in range(min(len(w1), len(w2))): + if w1[i] != w2[i]: + indices.add(i) return indices def ith_word_diff(self, other_attacked_text, i): @@ -264,6 +251,43 @@ class AttackedText: return True return w1[i] != w2[i] + def words_diff_num(self, other_attacked_text): + # using edit distance to calculate words diff num + def generate_tokens(words): + result = {} + idx = 1 + for w in words: + if w not in result: + result[w] = idx + idx += 1 + return result + def words_to_tokens(words, tokens): + result = [] + for w in words: + result.append(tokens[w]) + return result + def edit_distance(w1_t, w2_t): + matrix = [[i + j for j in range(len(w2_t) + 1)] for i in range(len(w1_t) + 1)] + + for i in range(1, len(w1_t) + 1): + for j in range(1, len(w2_t) + 1): + if w1_t[i - 1] == w2_t[j - 1]: + d = 0 + else: + d = 1 + matrix[i][j] = min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + d) + + return matrix[len(w1_t)][len(w2_t)] + def cal_dif(w1, w2): + tokens = generate_tokens(w1 + w2) + w1_t = words_to_tokens(w1, tokens) + w2_t = words_to_tokens(w2, tokens) + return edit_distance(w1_t, w2_t) + w1 = self.words + w2 = other_attacked_text.words + return cal_dif(w1, w2) + + def convert_from_original_idxs(self, idxs): """Takes indices of words from original string and converts them to indices of the same words in the current string.
Update user_manual.md Adding packages to be installed to support Infiniband
@@ -686,13 +686,27 @@ Openib and libibverbs need to be install to compile Open MPI over Infiniband. Fo install the epel repository on the container. This step is not required if running using TCP/IP is enough. +To install the Infiniband drivers one needs to install the epel repository. ``` yum install -y epel-release -yum install *openib* -yum install *openib-devel* -yum install libibverbs* -yum install libibverbs-devel* +``` + +The list of packages to be installed is: +``` +openib +libibverbs +libnes +libibumad +opensm-libs +swig +ibutils-libs +ibutils +opensm +libibmad +infiniband-diags +libibverbs-utils +libibverbs-devel ``` The Open MPI source is compiled and installed in the container under /usr for convenience:
Update labels in segment_path_length Found a typo where segment_path_length debugging image was plotting segment ID's rather than the segment lengths because it used to write out both.
@@ -40,7 +40,7 @@ def segment_path_length(segmented_img, objects): # Put labels of length for c, value in enumerate(segment_lengths): - text = "{:.2f}".format(c, value) + text = "{:.2f}".format(value) w = label_coord_x[c] h = label_coord_y[c] cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
DOC: Add extlink extension Links to github issues
@@ -45,6 +45,7 @@ sys.path.insert(0, os.path.abspath('.')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', + 'sphinx.ext.extlinks', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', @@ -401,6 +402,10 @@ intersphinx_mapping = { autodoc_member_order = 'bysource' autosummary_generate = True +extlinks = { + 'issue': ('https://github.com/has2k1/plotnine/issues/%s', 'GH') +} + def setup(app): app.add_stylesheet('custom.css')
BLD: Replace source files with lib Use lib to find symbols rather than object files
@@ -930,7 +930,7 @@ def generate_umath_c(ext, build_dir): config.add_extension('_multiarray_umath', sources=multiarray_src + umath_src + - npymath_sources + common_src + + common_src + [generate_config_h, generate_numpyconfig_h, generate_numpy_api, @@ -941,7 +941,7 @@ def generate_umath_c(ext, build_dir): ], depends=deps + multiarray_deps + umath_deps + common_deps, - libraries=['npysort'], + libraries=['npymath', 'npysort'], extra_info=extra_info) #######################################################################
Only filter out datasets when statistics are disabled in Transform. Previously, if cache covers an entire dataset, it wouldn't be included in the statistics computation, this change means it will be included.
@@ -614,12 +614,6 @@ class Executor(base_executor.BaseExecutor): tft_beam.analysis_graph_builder.get_analysis_dataset_keys( preprocessing_fn, feature_spec, list(analysis_key_to_dataset.keys()), input_cache)) - if len(filtered_analysis_dataset_keys) < len(analysis_key_to_dataset): - tf.logging.info('Not reading the following datasets due to cache: %s', [ - v.file_pattern_suffix - for k, v in analysis_key_to_dataset.items() - if k not in filtered_analysis_dataset_keys - ]) new_analyze_data_dict = {} for key, dataset in six.iteritems(analysis_key_to_dataset): @@ -851,13 +845,19 @@ class Executor(base_executor.BaseExecutor): p | self._OptimizeRun(input_cache_dir, output_cache_dir, analyze_data_list, feature_spec, preprocessing_fn, self._GetCacheSource())) - # Removing unneeded datasets if they won't be needed for - # materialization. This means that these datasets won't be included in - # the statistics computation or profiling either. - if not materialize_output_paths: + # Removing unneeded datasets if they won't be needed for statistics or + # materialization. + if not materialize_output_paths and not compute_statistics: analyze_data_list = [ d for d in new_analyze_data_dict.values() if d is not None ] + if len(analyze_data_list) < len(new_analyze_data_dict): + tf.logging.info( + 'Not reading the following datasets due to cache: %s', [ + dataset.file_pattern_suffix + for dataset in analyze_data_list + if dataset not in new_analyze_data_dict.values() + ]) analyze_decode_fn = ( self._GetDecodeFunction(raw_examples_data_format,
fixed query modified query to use NOT instead of !=
name: Windows Non-System Account Targeting Lsass id: b1ce9a72-73cf-11ec-981b-acde48001122 -version: 1 -date: '2022-01-12' +version: 2 +date: '2022-07-30' author: Michael Haag, Splunk type: TTP datamodel: [] @@ -10,7 +10,7 @@ description: The following analytic identifies non SYSTEM accounts requesting ac requiring access to credentials. Triaging this event will require understanding the GrantedAccess from the SourceImage. In addition, whether the account is privileged or not. Review the process requesting permissions and review parallel processes. -search: '`sysmon` EventCode=10 TargetImage=*lsass.exe SourceUser!="NT AUTHORITY\\*" +search: '`sysmon` EventCode=10 TargetImage=*lsass.exe NOT (SourceUser="NT AUTHORITY\\*") | stats count min(_time) as firstTime max(_time) as lastTime by Computer, TargetImage, GrantedAccess, SourceImage, SourceProcessId, SourceUser, TargetUser | rename Computer as dest | `security_content_ctime(firstTime)`| `security_content_ctime(lastTime)`
Fix flake8 config Having the `select` meant that flake8 was only checking the `B901` rule and ignoring all others.
@@ -3,7 +3,7 @@ xfail_strict=true [flake8] -exclude = venv*,__pycache__,node_modules,cache,migrations,build +exclude = venv*,__pycache__,node_modules,cache,migrations,build,sample_cap_xml_documents.py +max-line-length = 120 # W504 line break after binary operator extend_ignore=B306, W504 -select=B901
Update welcome_email.rst Fixed typos
@@ -19,7 +19,7 @@ Subject: New Communication Platform - Mattermost Hi all, -As some of you already know, we are moving to Mattermost as our communication platform. Mattermost is a messaging app where you can talk, share files, and collaborate on projects or initiatives. Mattermost also integrate with many of the apps that you use everyday, like **[add apps]**. +As some of you already know, we are moving to Mattermost as our communication platform. Mattermost is a messaging app where you can talk, share files, and collaborate on projects or initiatives. Mattermost also integrates with many of the apps that you use every day, like **[add apps]**. We are moving to Mattermost because it will allow us to become more productive by being able communicate in one place instead of using email, instant chat, and a multitude of other programs.
Integ-tests: only use simple protocol when running NCCL test This change is done to fix test_efa test on p4d. It is done according to the doc:
@@ -13,5 +13,6 @@ mpirun \ -x RDMAV_FORK_SAFE=1 \ -x NCCL_ALGO=ring \ -x NCCL_DEBUG=WARNING \ +-x NCCL_PROTO=simple \ --mca pml ^cm --mca btl tcp,self --mca btl_tcp_if_exclude lo,docker0 --bind-to none \ /shared/openmpi/nccl-tests-2.10.0/build/all_reduce_perf -b 8 -e 1G -f 2 -g 1 -c 1 -n 100 > /shared/nccl_tests.out
portico: Move carousel forward on clicking inside tour container. Fixes:
@@ -241,6 +241,14 @@ var load = function () { interval: false, }); + // Move to the next slide on clicking inside the carousel container + $(".carousel-inner .item-container").click(function (e) { + // We don't want to trigger this event if user clicks on a link + if (e.target.tagName.toLowerCase() !== "a") { + $("#myCarousel").carousel("next"); + } + }); + // Set up events / categories / search events(); };
Fix perspective change in setData The condition `perspective != self._perspective` was always False (attribute alreday updated when reaching this code)
@@ -488,7 +488,9 @@ class StackView(qt.QMainWindow): self._stack = stack self.__createTransposedView() + perspective_changed = False if perspective != self._perspective: + perspective_changed = True self.__setPerspective(perspective) # This call to setColormap redefines the meaning of autoscale @@ -513,11 +515,10 @@ class StackView(qt.QMainWindow): # enable and init browser self._browser.setEnabled(True) - if perspective != self._perspective: + if perspective_changed: self.__planeSelection.setPerspective(perspective) # this causes self.__setPerspective to be called, which emits # sigStackChanged and sigPlaneSelectionChanged - else: self.sigStackChanged.emit(stack.size)
[isolate] limit expired cron job to 9 minutes Otherwise on prod it exceeds its 10 minute hard limit and this causes a 500.
@@ -243,13 +243,17 @@ class CronCleanupExpiredHandler(webapp2.RequestHandler): """Triggers taskqueues to delete 500 items at a time.""" @decorators.require_cronjob def get(self): + # Do not run for more than 9 minutes. Exceeding 10min hard limit causes 500. + end = time.time() + 9*60 triggered = 0 total = 0 q = model.ContentEntry.query( model.ContentEntry.expiration_ts < utils.utcnow()) cursor = None more = True - while more: + while more and time.time() < end: + # Since this query dooes not fetch the ContentEntry entities themselves, + # we cannot easily compute the size of the data deleted. keys, cursor, more = q.fetch_page( 500, start_cursor=cursor, keys_only=True) if not keys:
Fix MetaMainHasInfoRule when running from meta dir When ansible-lint is run directly from the meta directory for meta/main.yml, file.path is simply 'main.yml' and fails the `str(file.path).endswith('/main.yml')` test. We can instead compare the name attribute of the Path object to 'main.yml'.
@@ -70,7 +70,7 @@ class MetaMainHasInfoRule(AnsibleLintRule): # since Ansible 2.10 we can add a meta/requirements.yml but # we only want to match on meta/main.yml - if not str(file.path).endswith('/main.yml'): + if file.path.name != 'main.yml': return [] galaxy_info = data.get('galaxy_info', False)
Discard seconds and microseconds when determining scheduled target_size changes Review-Url:
@@ -278,6 +278,8 @@ def ensure_entities_exist(max_concurrent=50): max_concurrent: Maximum number of concurrent asynchronous requests. """ now = utils.utcnow() + # Seconds and microseconds are too granular for determining scheduling. + now = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute) # Generate a few asynchronous requests at a time in order to prevent having # too many in flight at a time.
Update erddap_data.py solve and close
@@ -582,25 +582,15 @@ class Fetch_wmo(ErddapArgoDataFetcher): list(str) """ if not self.parallel: - if len(self.WMO) <= 5: # todo: This max WMO number should be parameterized somewhere else - # Retrieve all WMOs in a single request - return [self.get_url()] - else: - # Retrieve one WMO by URL sequentially (same behaviour as localftp and argovis) - urls = [] - for wmo in self.WMO: - urls.append( - Fetch_wmo( - WMO=wmo, CYC=self.CYC, ds=self.dataset_id, parallel=False - ).get_url() - ) - return urls + chunks = "auto" + chunks_maxsize = {'wmo': 5} else: + chunks = self.chunks + chunks_maxsize = self.chunks_maxsize self.Chunker = Chunker( - {"wmo": self.WMO}, chunks=self.chunks, chunksize=self.chunks_maxsize + {"wmo": self.WMO}, chunks=chunks, chunksize=chunks_maxsize ) wmo_grps = self.Chunker.fit_transform() - # self.chunks = C.chunks urls = [] for wmos in wmo_grps: urls.append(
<fix>[vm]: fix vnuma bug when open vnuma change memory size type Resolves:
@@ -3461,7 +3461,7 @@ class Vm(object): if numa_nodes: numa = e(cpu, 'numa') for _, numa_node in enumerate(numa_nodes): - e(numa,'cell', attrib={'id': str(numa_node.nodeID), 'cpus': str(numa_node.cpus), 'memory': str(numa_node.memorySize/1024), 'unit': 'KiB'}) + e(numa,'cell', attrib={'id': str(numa_node.nodeID), 'cpus': str(numa_node.cpus), 'memory': str(int(numa_node.memorySize)/1024), 'unit': 'KiB'}) distances = e(numa, 'distances') for node_index, distance in enumerate(numa_node.distance): e(distances,'sibling',attrib={'id': str(node_index), 'value':str(distance)})
Use --lockfile in place of --collections-lock +label: docsite_pr
@@ -224,11 +224,11 @@ Mazer supports specifying a list of collections to be installed from a file (a 'collections lockfile'). To install collections specified in a lockfile, use the -``--collections-lock`` option of the ``install`` subcommand: +``--lockfile`` option of the ``install`` subcommand: .. code-block:: bash - $ mazer install --collections-lock collections_lockfile.yml + $ mazer install --lockfile=collections_lockfile.yml Setting the Collections path @@ -291,12 +291,12 @@ the ``--frozen`` flag: To reproduce an existing installed collection path, redirect the 'list --lockfile' -output to a file and use that file with 'install --collections-lock': +output to a file and use that file with 'install --lockfile': .. code-block:: bash $ mazer list --lockfile > collections_lockfile.yml - $ mazer install --collections-path /tmp/somenewplace --collections-lock collections_lockfile.yml + $ mazer install --collections-path /tmp/somenewplace --lockfile=collections_lockfile.yml Building ansible content collection artifacts ---------------------------------------------
[Jira] Issues for Backlog updated URL Issues for backlog is using the incorrect URL according to this documentation: * * resulting in HTTP 404
@@ -4221,7 +4221,7 @@ api-group-workflows/#api-rest-api-2-workflow-search-get) """ :param board_id: int, str """ - url = "rest/agile/1.0/{board_id}/backlog".format(board_id=board_id) + url = "rest/agile/1.0/board/{board_id}/backlog".format(board_id=board_id) return self.get(url) def get_issues_for_board(self, board_id, jql, fields="*all", start=0, limit=None, expand=None):
Update pyproject.toml Bump version to 0.4.2
[tool.poetry] name = "autogoal" -version = "0.4.1" +version = "0.4.2" authors = ["Suilan Estevez-Velarde <[email protected]>", "Alejandro Piad-Morffis <[email protected]>"] description = "Automatic Generation Optimization And Learning" license = "MIT"
Modified time.sleep(1) delay amount Modified time.sleep(1) delay amount to give glowscript more time to launch before opening up Comm channel. Also modified code for VPython on JupyterHub
@@ -58,10 +58,15 @@ else: if jupyterlab.__version__ >= '0.35.0': from os.path import join labextensions_dir = join(jupyterlab.commands.get_app_dir(), u'static') + try: notebook.nbextensions.install_nbextension(path=package_dir + "/vpython_data", nbextensions_dir=labextensions_dir, overwrite=False, verbose=0) + except PermissionError: + #logging.info("PermissionError: Unable to install /vpython_data directory and files for VPython on JupyterLab") + pass + if 'nbextensions' in os.listdir(jd): ldir = os.listdir(nbdir) @@ -101,7 +106,10 @@ display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextension display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/glowcomm"], function(){console.log("GLOWCOMM LOADED");});}else{element.textContent = ' ';}""")) display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/jquery-ui.custom.min"], function(){console.log("JQUERY LOADED");});}else{element.textContent = ' ';}""")) -time.sleep(1) # allow some time for javascript code above to run before attempting to setup Comm Channel +if transfer: + time.sleep(4) # allow some time for javascript code above to run after nbextensions update before attempting to setup Comm Channel +else: + time.sleep(2) # allow some time for javascript code above to run before attempting to setup Comm Channel wsConnected = False
(trivial) kivy: add missing import follow-up
@@ -12,6 +12,7 @@ from decimal import Decimal from electrum.simple_config import FEERATE_WARNING_HIGH_FEE, FEE_RATIO_HIGH_WARNING from electrum.gui.kivy.i18n import _ from electrum.plugin import run_hook +from electrum.util import NotEnoughFunds from .fee_dialog import FeeSliderDialog, FeeDialog
Ensure that resource arg vals are strings Unlike flags, which may be numeric, resource spec vals are always strings.
@@ -190,7 +190,7 @@ def _split_flag_args(args, opdef): resource_vals = {} for name, val in parsed.items(): if _is_resource(name, opdef, parsed): - resource_vals[name] = val + resource_vals[name] = str(val) else: flag_vals[name] = val return flag_vals, resource_vals
Make exceptions loading extension schemas non-fatal. * Make exceptions loading extension schemas non-fatal. Also cache results of check_exists().
@@ -120,7 +120,7 @@ class Fetcher(object): class DefaultFetcher(Fetcher): def __init__(self, - cache, # type: Dict[Text, Text] + cache, # type: Dict[Text, Union[Text, bool]] session # type: Optional[requests.sessions.Session] ): # type: (...) -> None self.cache = cache @@ -128,8 +128,10 @@ class DefaultFetcher(Fetcher): def fetch_text(self, url): # type: (Text) -> Text - if url in self.cache: - return self.cache[url] + if url in self.cache and self.cache[url] is not True: + # treat "True" as a placeholder that indicates something exists but + # not necessarily what its contents is. + return cast(Text, self.cache[url]) split = urllib.parse.urlsplit(url) scheme, path = split.scheme, split.path @@ -172,6 +174,7 @@ class DefaultFetcher(Fetcher): resp.raise_for_status() except Exception as e: return False + self.cache[url] = True return True elif scheme == 'file': return os.path.exists(urllib.request.url2pathname(str(path))) @@ -244,7 +247,7 @@ class Loader(object): idx=None, # type: Dict[Text, Union[CommentedMap, CommentedSeq, Text, None]] cache=None, # type: Dict[Text, Any] session=None, # type: requests.sessions.Session - fetcher_constructor=None, # type: Callable[[Dict[Text, Text], requests.sessions.Session], Fetcher] + fetcher_constructor=None, # type: Callable[[Dict[Text, Union[Text, bool]], requests.sessions.Session], Fetcher] skip_schemas=None # type: bool ): # type: (...) -> None @@ -378,8 +381,9 @@ class Loader(object): if self.skip_schemas: return for sch in aslist(ns): + try: fetchurl = self.fetcher.urljoin(base_url, sch) - if fetchurl not in self.cache: + if fetchurl not in self.cache or self.cache[fetchurl] is True: _logger.debug("Getting external schema %s", fetchurl) content = self.fetch_text(fetchurl) self.cache[fetchurl] = rdflib.graph.Graph() @@ -394,6 +398,8 @@ class Loader(object): pass except BadSyntax: pass + except Exception as e: + _logger.warn("Could not load extension schema %s: %s", fetchurl, e) for s, _, _ in self.graph.triples((None, RDF.type, RDF.Property)): self._add_properties(s)
BioStruct-X --> Wellcome Trust BSX website no longer exists
@@ -44,7 +44,7 @@ list. Funding ======= -DIALS development at `Diamond Light Source`_ is supported by the `BioStruct-X`_ EU grant, +DIALS development at `Diamond Light Source`_ is supported by the `Wellcome Trust`_, `Diamond Light Source`_, and `CCP4`_. DIALS development at `Lawrence Berkeley National Laboratory`_ is @@ -52,7 +52,7 @@ supported by `National Institutes of Health`_ / `National Institute of General M grant R01-GM117126. Work at LBNL is performed under `Department of Energy`_ contract DE-AC02-05CH11231. -.. _`BioStruct-X`: http://www.biostruct-x.org/ +.. _`Wellcome Trust`: https://wellcome.ac.uk/ .. _`CCP4`: http://www.ccp4.ac.uk/ .. _`Diamond Light Source`: http://www.diamond.ac.uk/ .. _`Lawrence Berkeley National Laboratory`: http://www.lbl.gov/
Added new category `python_news` to config, that hold mail lists, channel and webhook. This use local dev environment IDs.
@@ -122,6 +122,7 @@ guild: channels: announcements: 354619224620138496 user_event_announcements: &USER_EVENT_A 592000283102674944 + python_news: &PYNEWS_CHANNEL 701667765102051398 # Development dev_contrib: &DEV_CONTRIB 635950537262759947 @@ -236,6 +237,7 @@ guild: reddit: 635408384794951680 duck_pond: 637821475327311927 dev_log: 680501655111729222 + python_news: &PYNEWS_WEBHOOK 701731296342179850 filter: @@ -568,5 +570,13 @@ duck_pond: - *DUCKY_MAUL - *DUCKY_SANTA +python_news: + mail_lists: + - 'python-ideas' + - 'python-announce-list' + - 'pypi-announce' + channel: *PYNEWS_CHANNEL + webhook: *PYNEWS_WEBHOOK + config: required_keys: ['bot.token']
Add `kedro install` to Iris example docs Update to the Iris example documentation page to clarify the order of project creation, installation of dependencies and project run Minor fix to contributing.md as per discussion
@@ -63,10 +63,7 @@ make test make build-docs ``` -> *Note:* If the tests in `tests/extras/datasets/spark` are failing, and you are -> not planning to work on Spark related features, then you can run a reduced -> test suite that excludes them. Do this by executing the following command: -> `make test-no-spark`. +> *Note:* If the tests in `tests/extras/datasets/spark` are failing, and you are not planning to work on [Spark](https://spark.apache.org) related features, then you can run a reduced test suite that excludes them. Do this by executing `make test-no-spark`. #### Then onwards (code or documentation changes)
Update desktop-app-deployment.rst Fix missing echo lines
@@ -105,8 +105,8 @@ You can distribute the official Windows Desktop App silently to end users, pre-c echo "minimizeToTray": false, echo "notifications": { echo "flashWindow": 0, - "bounceIcon": false, - "bounceIconType": 'informational', + echo "bounceIcon": false, + echo "bounceIconType": 'informational', echo }, echo "showUnreadBadge": true, echo "useSpellChecker": true,
concat KJTs Summary: Pull Request resolved: X-link: Add a static function to concat a list of KJTs
@@ -333,12 +333,12 @@ class SingleStepSyntheticSparseArchRewardNet(nn.Module): ) sparse_data_per_step = [ KeyedJaggedTensor.concat( - KeyedJaggedTensor.concat( - state_id_list_per_step[i], action_id_list_per_step[i] - ), - KeyedJaggedTensor.concat( - state_id_score_list_per_step[i], action_id_score_list_per_step[i] - ), + [ + state_id_list_per_step[i], + action_id_list_per_step[i], + state_id_score_list_per_step[i], + action_id_score_list_per_step[i], + ] ) for i in range(seq_len) ]
Verification: improve confirmation message handling Suppress errors coming from Discord when changing the confirmation message in case it gets deleted, or something else goes wrong. This commit also adds either the ok hand or the warning emoji to the edited message content, as with the guild syncer confirmation.
@@ -213,15 +213,21 @@ class Verification(Cog): log.debug("Staff prompt not answered, aborting operation") return False finally: + with suppress(discord.HTTPException): await confirmation_msg.clear_reactions() result = str(choice) == constants.Emojis.incident_actioned log.debug(f"Received answer: {choice}, result: {result}") # Edit the prompt message to reflect the final choice - await confirmation_msg.edit( - content=f"Request to kick `{n_members}` members was {'authorized' if result else 'denied'}!" - ) + if result is True: + result_msg = f":ok_hand: Request to kick `{n_members}` members was authorized!" + else: + result_msg = f":warning: Request to kick `{n_members}` members was denied!" + + with suppress(discord.HTTPException): + await confirmation_msg.edit(content=result_msg) + return result async def _kick_members(self, members: t.Collection[discord.Member]) -> int:
Fix `readonlinesnovel` chapter body download The source removed `<div class=reading_area><div>` from chapter body
@@ -61,6 +61,6 @@ class ReadOnlineNovelsCrawler(Crawler): def download_chapter_body(self, chapter): soup = self.get_soup(chapter['url']) - contents = soup.select_one('.read-context .reading_area') + contents = soup.select_one('.read-context') assert contents, 'No chapter contents found' return self.cleaner.extract_contents(contents)
Remove assert that verifies if tables were marked A summary of this would be that having multiple workers and multiple insert/select on the same table causes a lot of issues. Please refer yourself to the issue below to have a better understanding. fixes
@@ -162,9 +162,6 @@ class PostgresTarget(luigi.Target): (self.update_id, self.table, datetime.datetime.now())) - # make sure update is properly marked - assert self.exists(connection) - def exists(self, connection=None): if connection is None: connection = self.connect()
Disable time_pxd test in Python 3.4 It uses features that are unavailable thus always fails Test was introduced in
@@ -438,6 +438,7 @@ VER_DEP_MODULES = { 'run.mod__spec__', 'run.pep526_variable_annotations', # typing module 'run.test_exceptions', # copied from Py3.7+ + 'run.time_pxd', # _PyTime_GetSystemClock doesn't exist in 3.4 ]), }
Refactor `geothermal.py` Change .ix (deprecated) to .loc Simplify `material_properties` import to one line
@@ -30,11 +30,11 @@ def calc_ground_temperature(locator, config, T_ambient_C, depth_m): ..[Kusuda, T. et al., 1965] Kusuda, T. and P.R. Achenbach (1965). Earth Temperatures and Thermal Diffusivity at Selected Stations in the United States. ASHRAE Transactions. 71(1):61-74 """ - material_properties = pd.read_excel(locator.get_thermal_networks(config.region), sheetname=['MATERIAL PROPERTIES'])['MATERIAL PROPERTIES'] - material_properties = material_properties.set_index(material_properties['material'].values) - heat_capacity_soil = material_properties.ix['Soil','Cp_JkgK'] # _[A. Kecebas et al., 2011] - conductivity_soil = material_properties.ix['Soil','lambda_WmK'] # _[A. Kecebas et al., 2011] - density_soil = material_properties.ix['Soil','rho_kgm3'] # _[A. Kecebas et al., 2011] + material_properties = pd.read_excel(locator.get_thermal_networks(config.region), + sheetname=['MATERIAL PROPERTIES'])['MATERIAL PROPERTIES'].set_index('material') + heat_capacity_soil = material_properties.loc['Soil','Cp_JkgK'] # _[A. Kecebas et al., 2011] + conductivity_soil = material_properties.loc['Soil','lambda_WmK'] # _[A. Kecebas et al., 2011] + density_soil = material_properties.loc['Soil','rho_kgm3'] # _[A. Kecebas et al., 2011] T_max = max(T_ambient_C) + 273.15 # to K
Update ffmpeg.py bitrate correction adjustment
@@ -238,7 +238,10 @@ class MediaStreamInfo(object): elif key == 'DISPOSITION:default': self.default = self.parse_bool(self.parse_int(val)) elif key.lower().startswith('tag:bps'): - self.bitrate = self.bitrate or (self.parse_int(val) * (1000 if self.parse_int(val) < 1000 else 1)) + self.bitrate = self.bitrate or self.parse_int(val) + + if self.bitrate and self.bitrate < 1000: + self.bitrate *= 1000 if key.startswith('TAG:'): key = key.split('TAG:')[1].lower()
BUG: fixed typo in variable Fixed typo in variable name.
@@ -481,7 +481,10 @@ class Instrument(object): in_kwargs[meth_key] = self.kwargs[sort_key][meth_key] # Get the inst_module string - istr = "None" if self.inst_module is None else self.inst_module.__name_ + if self.inst_module is None: + istr = "None" + else: + istr = getattr(self.inst_module, "__name__") # Create string for other parts Instrument instantiation out_str = "".join(["pysat.Instrument(platform='", self.platform,
Create function for encoding string values Makes it easier to unit test
@@ -5728,6 +5728,14 @@ def _checkValueItemParent(policy_element, policy_name, policy_key, return False +def _encode_string(value): + encoded_null = chr(0).encode('utf-16-le') + if value is None: + return encoded_null + else: + return b''.join([value.encode('utf-16-le'), encoded_null]) + + def _buildKnownDataSearchString(reg_key, reg_valueName, reg_vtype, reg_data, check_deleted=False): ''' @@ -5749,8 +5757,7 @@ def _buildKnownDataSearchString(reg_key, reg_valueName, reg_vtype, reg_data, elif reg_vtype == "REG_QWORD": this_element_value = struct.pack(b'Q', int(reg_data)) elif reg_vtype == 'REG_SZ': - this_element_value = b''.join([reg_data.encode('utf-16-le'), - encoded_null]) + this_element_value = _encode_string(reg_data) if check_deleted: reg_vtype = 'REG_SZ' expected_string = b''.join(['['.encode('utf-16-le'), @@ -5845,11 +5852,7 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element, return None elif etree.QName(element).localname == 'string': this_vtype = 'REG_SZ' - if element.text is None: - this_element_value = encoded_null - else: - this_element_value = b''.join([element.text.encode('utf-16-le'), - encoded_null]) + this_element_value = _encode_string(element.text) elif etree.QName(parent_element).localname == 'elements': standard_element_expected_string = True if etree.QName(element).localname == 'boolean': @@ -5892,9 +5895,7 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element, if 'expandable' in element.attrib: if element.attrib['expandable'].lower() == 'true': this_vtype = 'REG_EXPAND_SZ' - if this_element_value is not None: - this_element_value = b''.join([this_element_value.encode('utf-16-le'), - encoded_null]) + this_element_value = _encode_string(this_element_value) elif etree.QName(element).localname == 'multiText': this_vtype = 'REG_MULTI_SZ' if not check_deleted else 'REG_SZ' if this_element_value is not None: @@ -5962,8 +5963,7 @@ def _processValueItem(element, reg_key, reg_valuename, policy, parent_element, six.unichr(len('{0}{1}'.format(element_values[i], chr(0)).encode('utf-16-le'))).encode('utf-32-le'), encoded_semicolon, - b''.join([element_values[i].encode('utf-16-le'), - encoded_null]), + _encode_string(element_values[i]), ']'.encode('utf-16-le')]) else: expected_string = del_keys + b''.join(['['.encode('utf-16-le'),
Force $.ads and $-debug.ads to be part of the binding closure TN:
@@ -19,9 +19,9 @@ with Langkit_Support.Slocs; use Langkit_Support.Slocs; with Langkit_Support.Symbols; use Langkit_Support.Symbols; with Langkit_Support.Text; use Langkit_Support.Text; -private with ${ada_lib_name}.Implementation; with ${ada_lib_name}.Common; use ${ada_lib_name}.Common; - +private with ${ada_lib_name}.Implementation; +private with ${ada_lib_name}.Debug; with ${ada_lib_name}.Lexer; use ${ada_lib_name}.Lexer; use ${ada_lib_name}.Lexer.Token_Data_Handlers; @@ -592,4 +592,10 @@ private Last : Token_Index; end record; + -- Dummy references to package to force them to be included in static + -- links (thanks to the binder). This benefits the GDB helpers at no cost. + + Version : String renames ${ada_lib_name}.Version; + package DBG renames ${ada_lib_name}.Debug; + end ${ada_lib_name}.Analysis;
Update upgrading-to-3.0.rst Fixing broken link
Upgrading to version 3.0 ======================== -Use these instructions if you are upgrading from 2.0.0, 2.1.0, or 2.2.0. If you are upgrading from a version earlier than 2.0.0, you must first `upgrade to version 2.0 <../upgrading-to-2.0.html>`_. +Use these instructions if you are upgrading from 2.0.0, 2.1.0, or 2.2.0. If you are upgrading from a version earlier than 2.0.0, you must first `upgrade to version 2.0 <../administration/upgrading-to-2.0.html>`_. You must upgrade to 3.0 before upgrading to the latest version of Mattermost.
Backfill cl/470082513 * Backfill cl/470082513 Centralize the discussion of sliced object downloads in the webpage * Update cp.py
@@ -363,26 +363,11 @@ _STREAMING_TRANSFERS_TEXT = """ _SLICED_OBJECT_DOWNLOADS_TEXT = """ <B>SLICED OBJECT DOWNLOADS</B> - gsutil uses HTTP Range GET requests to perform "sliced" downloads in parallel - when downloading large objects from Cloud Storage. This means that disk - space for the temporary download destination file is pre-allocated and - byte ranges (slices) within the file are downloaded in parallel. Once all - slices have completed downloading, the temporary file is renamed to the - destination file. No additional local disk space is required for this - operation. - - This feature is only available for Cloud Storage objects because it - requires a fast composable checksum (CRC32C) to verify the - data integrity of the slices. Because sliced object downloads depend on CRC32C, - they require a compiled crcmod on the machine performing the download. If compiled - crcmod is not available, a non-sliced object download is performed instead. - - NOTE: Since sliced object downloads cause multiple writes to occur at various - locations on disk, this mechanism can degrade performance for disks with slow - seek times, especially for large numbers of slices. While the default number - of slices is set small to avoid this problem, you can disable sliced object - download if necessary by setting the "sliced_object_download_threshold" - variable in the ``.boto`` config file to 0. + gsutil can automatically use ranged ``GET`` requests to perform downloads in + parallel for large files being downloaded from Cloud Storage. See `sliced object + download documentation + <https://cloud.google.com/storage/docs/sliced-object-downloads>`_ + for a complete discussion. """ _PARALLEL_COMPOSITE_UPLOADS_TEXT = """
(from AES) Update error-response-overrides.md removed `json_format` from first Known Limitations section
@@ -113,7 +113,7 @@ spec: ## Known Limitations -- `text_format`, `text_format_source`, and `json_format` perform no string +- `text_format`and `text_format_source` perform no string escaping on expanded variables. This may break the structural integrity of your response body if, for example, the variable contains HTML data and the response content type is `text/html`. Be careful when using variables in this way, and
release(testnet): initFile uses new BT/LTE logic Until /json and /initFile are unified, we need to remember to make updates in both places.
@@ -8,7 +8,7 @@ services: - dbus-session - diagnostics environment: - - FIRMWARE_VERSION=2021.12.14.0-1 + - FIRMWARE_VERSION=2021.12.14.0-2 - DBUS_SYSTEM_BUS_ADDRESS=unix:path=/host/run/dbus/system_bus_socket - DBUS_SESSION_BUS_ADDRESS=unix:path=/session/dbus/session_bus_socket privileged: true @@ -58,9 +58,9 @@ services: - RELEASE_BUMPER=foobar diagnostics: - image: nebraltd/hm-diag:4ab18f4 + image: nebraltd/hm-diag:e59ca4f environment: - - FIRMWARE_VERSION=2021.12.14.0-1 + - FIRMWARE_VERSION=2021.12.14.0-2 - DIAGNOSTICS_VERSION=c22a429 - DBUS_SYSTEM_BUS_ADDRESS=unix:path=/host/run/dbus/system_bus_socket volumes: @@ -92,7 +92,7 @@ services: - dbus:/session/dbus environment: - DBUS_ADDRESS=unix:path=/session/dbus/session_bus_socket - - FIRMWARE_VERSION=2021.12.14.0-1 + - FIRMWARE_VERSION=2021.12.14.0-2 volumes: miner-storage:
Update mapping_tests.py Make output of gradient check verbose to diagnose error
@@ -48,7 +48,7 @@ class MappingTests(unittest.TestCase): for activation in ['tanh', 'relu', 'sigmoid']: mapping = GPy.mappings.MLPext(input_dim=3, hidden_dims=[5,5,5], output_dim=2, activation=activation) X = np.random.randn(100,3) - self.assertTrue(MappingGradChecker(mapping, X).checkgrad()) + self.assertTrue(MappingGradChecker(mapping, X).checkgrad(verbose=True)) def test_addmapping(self): m1 = GPy.mappings.MLP(input_dim=3, hidden_dim=5, output_dim=2)
Fixed error on chr function when decrypt On line 23 when make the operations returns a float and chr function doesn't permit float values as parameters.
@@ -20,7 +20,7 @@ class Onepad: '''Function to decrypt text using psedo-random numbers.''' plain = [] for i in range(len(key)): - p = (cipher[i]-(key[i])**2)/key[i] + p = int((cipher[i]-(key[i])**2)/key[i]) plain.append(chr(p)) plain = ''.join([i for i in plain]) return plain
Avoid NULL dereference in __Pyx_KwValues_FASTCALL Simpler follow up to I don't think we need to be worried null args and non-zero nargs, but null args and 0 nargs is quite common and valid I think. This PR just avoids a dereference in that case (which is probably dubious).
@@ -422,7 +422,7 @@ bad: #if CYTHON_METH_FASTCALL #define __Pyx_Arg_FASTCALL(args, i) args[i] #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) - #define __Pyx_KwValues_FASTCALL(args, nargs) (&args[nargs]) + #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs)) static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) #else
Support callbacks when parsing process graphs, use it to implement 'apply' process dimensions parameter is not yet used:
@@ -36,6 +36,18 @@ class ImageCollection(ABC): """ pass + def apply(self,process:str,dimensions = []) -> 'ImageCollection': + """ + Applies a unary process (a local operation) to each value of the specified or all dimensions in the data cube. + https://open-eo.github.io/openeo-api/v/0.4.0/processreference/#apply + + :param process: A process (callback) to be applied on each value. The specified process must be unary meaning that it must work on a single value. + :param dimensions: The names of the dimensions to apply the process on. Defaults to an empty array so that all dimensions are used. + :return: A data cube with the newly computed values. The resolution, cardinality and the number of dimensions are the same as for the original data cube. + """ + raise NotImplemented("Apply function not supported by this data cube.") + + def apply_pixel(self, bands: List, bandfunction) -> 'ImageCollection': """Apply a function to the given set of bands in this image collection.
ceph-validate: fail on CentOS 7 The Ceph Octopus release is only supported on CentOS 8 Closes:
msg: "Distribution not supported {{ ansible_os_family }}" when: ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux', 'Suse'] +- name: fail on unsupported CentOS release + fail: + msg: "CentOS release not supported {{ ansible_distribution_major_version }}" + when: + - ansible_distribution == 'CentOS' + - ansible_distribution_major_version | int != 8 + - name: red hat based systems tasks when: - ceph_repository == 'rhcs'
Add warn in validate_ip Refer to issue
@@ -5,6 +5,7 @@ import functools import ipaddress import itertools import typing +import logging from typing import Dict, List, Optional, Union from aiohttp import web @@ -35,6 +36,8 @@ TELEGRAM_SUBNET_2 = ipaddress.IPv4Network('91.108.4.0/22') allowed_ips = set() +log = logging.getLogger(__name__) + def _check_ip(ip: str) -> bool: """ @@ -258,7 +261,9 @@ class WebhookRequestHandler(web.View): if self.request.app.get('_check_ip', False): ip_address, accept = self.check_ip() if not accept: + log.warning(f"Blocking request from a unauthorized IP: {ip_address}") raise web.HTTPUnauthorized() + # context.set_value('TELEGRAM_IP', ip_address)
Fix, was using builtin names that were not really needed. * In some cases, "__debug__" was added as a constant because the integer 1 was used in a tuple, e.g. and that was not used at all though.
@@ -1077,6 +1077,8 @@ def allocateNestedConstants(module_context): considerForDeferral(constant_value.start) considerForDeferral(constant_value.step) considerForDeferral(constant_value.stop) + elif constant_type in (str, NoneType, int, long): + pass elif constant_value in builtin_named_values_list: considerForDeferral(builtin_named_values[constant_value])
Update codeowners test following Update codeowners test following
@@ -55,6 +55,7 @@ def _vendor_module_testcases(mod_name, expected_group): ("in/any/dir/any_file.py", BASE_MAINTAINERS), ("cirq/contrib/bla.py", BASE_MAINTAINERS), ("cirq/experiments/bla.py", QCVV_MAINTAINERS), + ("cirq/docs/qcvv/my_fancy_notebook.ipynb", QCVV_MAINTAINERS), *_vendor_module_testcases("aqt", AQT_MAINTAINERS), *_vendor_module_testcases("ionq", IONQ_MAINTAINERS), *_vendor_module_testcases("google", GOOGLE_MAINTAINERS),
dnf: enable fastestmirror by default Enabling the fastestmirror plugin allows dnf to choose the fastest (also usually the closest) mirror to the instance of osbuild. It has no effect on builds that force the use of a specific server or mirror.
@@ -22,6 +22,7 @@ arguments generated from the stage options: * `--forcearch {basearch}` * `--releasever {releasever}` * `--setopt install_weak_deps={install_weak_deps}` +* `--setopt fastestmirror={fastestmirror}` * `--config /tmp/dnf.conf` * `--exclude {pkg}` for each item in `exclude_packages` @@ -123,6 +124,11 @@ STAGE_OPTS = """ "type": "boolean", "default": true }, + "fastestmirror": { + "description": "Whether DNF should choose the fastest mirror available", + "type": "boolean", + "default": true + }, "exclude_packages": { "description": "List of package-specs to --exclude", "type": "array", @@ -210,6 +216,7 @@ def main(tree, options): basearch = options["basearch"] operation = options.get("operation", "install") weak_deps = options.get("install_weak_deps", True) + fastestmirror = options.get("fastestmirror", True) exclude_packages = options.get("exclude_packages", []) module_platform_id = options.get("module_platform_id", None) @@ -253,6 +260,7 @@ def main(tree, options): "--forcearch", basearch, "--setopt", "reposdir=", "--setopt", f"install_weak_deps={weak_deps}", + "--setopt", f"fastestmirror={fastestmirror}", "--setopt", f"skip_if_unavailable=false", "--releasever", releasever, "--noplugins",
Update bug report template add maria to bug report
@@ -43,7 +43,7 @@ body: - deepspeed: @stas00 - ray/raytune: @richardliaw, @amogkam - Documentation: @sgugger and @stevhliu + Documentation: @sgugger, @stevhliu and @MKhalusova Model hub:
README: fix typo Closes
@@ -111,13 +111,13 @@ Consult the `Changelog <https://docs.pytest.org/en/latest/changelog.html>`__ pag Support pytest -------------- -You can support pytest by obtaining a `Tideflift subscription`_. +You can support pytest by obtaining a `Tidelift subscription`_. Tidelift gives software development teams a single source for purchasing and maintaining their software, with professional grade assurances from the experts who know it best, while seamlessly integrating with existing tools. -.. _`Tideflift subscription`: https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=readme +.. _`Tidelift subscription`: https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=readme Security
[testing] download idna 2.7 directly * [testing] download idna 2.7 directly This package was removed from pypi. We still need it for python2, so we'll download it directly via git. * [testing] fix pycparser package
@@ -21,8 +21,8 @@ pipeline { sh """ virtualenv .testenv source .testenv/bin/activate - pip install "idna<=2.7" - pip install "pycparser<=2.18" + pip install https://github.com/kjd/idna/archive/refs/tags/v2.7.zip + pip install https://github.com/eliben/pycparser/archive/refs/tags/release_v2.18.zip pip install -e .[testing] pytest """ @@ -30,8 +30,8 @@ pipeline { sh """ virtualenv .lintenv source .lintenv/bin/activate - pip install "idna<=2.7" - pip install "pycparser<=2.18" + pip install https://github.com/kjd/idna/archive/refs/tags/v2.7.zip + pip install https://github.com/eliben/pycparser/archive/refs/tags/release_v2.18.zip pip install -e .[linting] flake8 """