message
stringlengths
13
484
diff
stringlengths
38
4.63k
Do not rm container on travis Causes weird error /dev/stdout: resource temporarily unavailable
@@ -25,7 +25,7 @@ install: - docker-compose up -d script: - - docker-compose run --rm -v /tmp/coverage:/tmp/coverage web bash -c "COVERAGE_FILE=/tmp/coverage/.coverage pytest --cov-report= --cov=." + - docker-compose run -v /tmp/coverage:/tmp/coverage web bash -c "COVERAGE_FILE=/tmp/coverage/.coverage pytest --cov-report= --cov=." after_success: - mv /tmp/coverage/.coverage .coverage.docker
Update README_noviflow.rst with faucet 1.5.2, the table config is updated to make all cases passed.
@@ -10,7 +10,7 @@ Introduction NoviFlow provide a range of switches known to work with FAUCET. -These instructions are known to work with a NoviFlow 1248, software NW400.1.8, running with FAUCET 1.4.0. +These instructions are known to work with a NoviFlow 1248 and NS-2116, software NW400.1.8 and NW400.2.1, running with FAUCET 1.5.2. In theory later versions should work, taking care to update the table configuration. @@ -29,14 +29,14 @@ In this example, the server running FAUCET is 10.0.1.8; configuration for CPN in **Configure the tables** - These matches are known to pass the unit tests as of FAUCET 1.4.0, but take care to adjust + These matches are known to pass the unit tests as of FAUCET 1.5.2, but take care to adjust ACL table matches and any changes for future versions. .. code:: bash - set config table tableid 0 matchfields 0 3 5 6 10 14 16 18 23 + set config table tableid 0 matchfields 0 3 4 5 6 10 14 16 18 23 set config table tableid 1 matchfields 0 3 4 5 6 - set config table tableid 2 matchfields 0 + set config table tableid 2 matchfields 0 5 6 10 14 set config table tableid 3 matchfields 0 3 4 5 6 10 23 29 set config table tableid 4 matchfields 5 6 10 11 12 set config table tableid 5 matchfields 5 6 10 27 29
[luci-config] Use gitiles.Location.parse in imports_ref parse_resolve raises an exception if ref is invalid. But ref in the location is irrelevant in import_ref because we need only host and project. Use simpler gitiles.Location.parse.
@@ -363,7 +363,10 @@ def import_ref(project_id, ref_name): raise NotFoundError('project %s not found' % project_id) if project.config_location.storage_type != GITILES_LOCATION_TYPE: raise Error('project %s is not a Gitiles project' % project_id) - loc = gitiles.Location.parse_resolve(project.config_location.url) + + # We don't call parse_resolve here because we are replacing treeish and + # path below anyway. + loc = gitiles.Location.parse(project.config_location.url) ref = None for r in projects.get_refs([project_id])[project_id] or ():
Add missing file from This was the missing h11 event case mentioned in the commit.
@@ -134,6 +134,8 @@ class H11Server(HTTPProtocol): self.streams[0].append(event.data) elif event is h11.NEED_DATA: break + elif isinstance(event, h11.ConnectionClosed): + break if self.connection.our_state is h11.MUST_CLOSE: self.transport.close() elif self.connection.our_state is h11.DONE:
Test-Commit: use upstream config for rdo jobs This commit is to ensure that the rdo jobs don't use internal config files. Also, testing out exactly how the current_build and hash vars look like
@@ -52,7 +52,10 @@ if [ ! -z ${current_build+x} ] export RELEASE="$RELEASE" #no mutations needed after latest changes export VARS="$VARS --extra-vars current_build=$current_build" else - export RELEASE="$RELEASE-rhel" + #implies this is a upstream job + #export RELEASE="$RELEASE-rhel" + echo "current_build is '$current_build'" + echo "hash is '$hash'" export VARS="$VARS --extra-vars current_build=$hash" fi fi
Removed should_compile logic Debugging, removed logic, force compile
@@ -177,7 +177,6 @@ class AppDynamicsInstaller(PHPExtensionHelper): The argument is the installer object that is passed into the `compile` method. """ - if(self._should_compile): print("ktully fork!!!") print("Downloading AppDynamics package...") install.package('APPDYNAMICS')
Add a new --file-pattern option (mutually exclusive with input_files Still to be implemented
@@ -50,8 +50,13 @@ def main(argv): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( 'input_files', - nargs="+", - help='Input files (EDF, SPEC)') + nargs="*", + help='Input files (EDF, SPEC).') + # input_files and --filepattern are mutually exclusive + parser.add_argument( + '--file-pattern', + help='File name pattern for loading a series of indexed files' + '(toto_%%04d.edf). Incompatible with argument input_files') parser.add_argument( '-o', '--output-uri', nargs="?", @@ -133,6 +138,17 @@ def main(argv): options = parser.parse_args(argv[1:]) + # mutually exclusive arguments, at least one of them is required + if bool(options.input_files) == bool(options.file_pattern is not None): # XOR + if not options.input_files: + message = "You must specify either input files (at least one), " + message += "or a file pattern." + else: + message = "You cannot specify input files and a file pattern" + message += " at the same time." + _logger.error(message) + return -1 + elif options.input_files: # some shells (windows) don't interpret wildcard characters (*, ?, []) old_input_list = list(options.input_files) options.input_files = [] @@ -143,7 +159,11 @@ def main(argv): options.input_files += [fname] else: options.input_files += globbed_files - old_input_list = None + else: + options.input_files = [] + # TODO + print(options.file_pattern) + raise NotImplementedError("TODO options.file_pattern to file list conversion") if options.debug: logging.root.setLevel(logging.DEBUG)
preserver user group ids and group names in ES on user save previously saving the user would wipe its group membership by excluding __group_ids and __group_names
@@ -2,6 +2,7 @@ import copy from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed, KafkaCheckpointEventHandler from corehq.apps.change_feed.document_types import COMMCARE_USER, WEB_USER, FORM from corehq.apps.change_feed.topics import FORM_SQL +from corehq.apps.groups.models import Group from corehq.apps.users.models import CommCareUser, CouchUser from corehq.apps.users.util import WEIRD_USER_IDS from corehq.elastic import ( @@ -47,6 +48,9 @@ def transform_user_for_elasticsearch(doc_dict): doc['base_username'] = doc['username'].split("@")[0] else: doc['base_username'] = doc['username'] + groups = Group.by_user(doc['_id'], wrap=False, include_names=True) + doc['__group_ids'] = [group['group_id'] for group in groups] + doc['__group_names'] = [group['name'] for group in groups] return doc
DOC: change fill_value of full_like from scalar to array_like Add example using array_like type. See
@@ -364,7 +364,7 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): a : array_like The shape and data-type of `a` define these same attributes of the returned array. - fill_value : scalar + fill_value : array_like Fill value. dtype : data-type, optional Overrides the data type of the result. @@ -412,6 +412,12 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): >>> np.full_like(y, 0.1) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> y = np.zeros([2, 2, 3], dtype=int) + >>> np.full_like(y, [0, 0, 255]) + array([[[ 0, 0, 255], + [ 0, 0, 255]], + [[ 0, 0, 255], + [ 0, 0, 255]]]) """ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape) multiarray.copyto(res, fill_value, casting='unsafe')
Windows: Avoid passing options to Clang on Windows that it hates * With this it almost works with MinGW64 only, but the Python library is not done.
@@ -855,7 +855,7 @@ if mingw_mode: env.Append(CPPDEFINES=["_WIN32_WINNT=0x0501"]) # At least older MinGW64 has issues with this, so disable it. -if mingw_mode and gcc_version < "6": +if mingw_mode and gcc_version < "6" and not clang_mode: env.Append(LINKFLAGS=["-Wl,--no-gc-sections"]) if debug_mode: @@ -970,6 +970,7 @@ if win_target: # For MinGW and cross compilation, we need to tell the subsystem # to target as well as to automatically import everything used. if gcc_mode: + if not clang_mode: env.Append(LINKFLAGS=["-Wl,--enable-auto-import"]) if win_disable_console: @@ -1265,7 +1266,7 @@ else: if gcc_mode: if macosx_target: env.Append(LINKFLAGS=["-Wno-deprecated-declarations"]) - else: + elif not clang_mode: env.Append(LINKFLAGS=["-s"]) # When debugging, optimize less than when optimizing, when not remove @@ -1631,7 +1632,7 @@ else: target = env.Program(result_exe, source_files + source_targets) # Avoid dependency on MinGW libraries. -if win_target and gcc_mode: +if win_target and gcc_mode and not clang_mode: env.Append(LINKFLAGS=["-static-libgcc"]) # Avoid IO for compilation as much as possible, this should make the
Use global for create_ccd_data size. So they can be compared in tests without hardcoding the value multiple times.
@@ -20,14 +20,16 @@ from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames, from astropy.nddata.ccddata import CCDData from astropy.table import Table +DEFAULT_DATA_SIZE = 100 with NumpyRNGContext(123): - _random_array = np.random.normal(size=[100, 100]) + _random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE]) def create_ccd_data(): """ - Return a CCDData object of size 100x100 with units of ADU. + Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE + with units of ADU. """ data = _random_array.copy() fake_meta = {'my_key': 42, 'your_key': 'not 42'} @@ -60,8 +62,8 @@ def test_ccddata_meta_header_conflict(): def test_ccddata_simple(): ccd_data = create_ccd_data() - assert ccd_data.shape == (100, 100) - assert ccd_data.size == 10000 + assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) + assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert ccd_data.dtype == np.dtype(float) @@ -77,8 +79,8 @@ def test_initialize_from_FITS(tmpdir): filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) - assert cd.shape == (100, 100) - assert cd.size == 10000 + assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) + assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v
remove ceph-iscsi-gw play from site.yml.sample We ship ceph-iscsi-gw in a separate repo downstream and do not package it with ceph-ansible. Including the play for ceph-iscsi-gw in site.yml.sample makes the playbook fail when using the downstream packages. Fixes:
- restapis - rbdmirrors - clients - - iscsigws - mgrs gather_facts: false tags: roles: - ceph-client -- hosts: iscsigws - gather_facts: false - become: True - roles: - - ceph-iscsi-gw - - hosts: mgrs gather_facts: false become: True
Set random seed to stop flaky test Summary: The function is not very stable in the larger range; will investigate that later.
@@ -5,6 +5,7 @@ import logging import unittest import numpy.testing as npt +import torch from ml.rl.models.actor import FullyConnectedActor, GaussianFullyConnectedActor from ml.rl.test.models.test_utils import check_save_load @@ -97,6 +98,7 @@ class TestGaussianFullyConnectedActor(unittest.TestCase): ) def test_get_log_prob(self): + torch.manual_seed(0) state_dim = 8 action_dim = 4 model = GaussianFullyConnectedActor(
Add description to policies in admin_password.py blueprint policy-docs
@@ -26,9 +26,16 @@ admin_password_policies = [ policy.RuleDefault( name=POLICY_ROOT % 'discoverable', check_str=base.RULE_ANY), - policy.RuleDefault( - name=BASE_POLICY_NAME, - check_str=base.RULE_ADMIN_OR_OWNER), + base.create_rule_default( + BASE_POLICY_NAME, + base.RULE_ADMIN_OR_OWNER, + "Change the administrative password for a server", + [ + { + 'path': '/servers/{server_id}/action (changePassword)', + 'method': 'POST' + } + ]) ]
return empty list when nothing's found [fix] imdb_watchlist no longer crashes with empty lists
@@ -130,7 +130,7 @@ class ImdbWatchlist: total_item_count = len(json_vars['list']['items']) if not total_item_count: logger.verbose('No movies were found in imdb list: {}', config['list']) - return + return [] imdb_ids = [] for item in json_vars['list']['items']: if is_valid_imdb_title_id(item.get('const')):
remove stop_at_first to simplify caching github issue: AdaCore/libadalang#45
@@ -48,7 +48,6 @@ package body Langkit_Support.Lexical_Env is Recursive : Boolean := True; Rebindings : Env_Rebindings := null; Metadata : Element_Metadata := Empty_Metadata; - Stop_At_First : Boolean; Results : in out Entity_Vectors.Vector); ----------------------- @@ -378,7 +377,6 @@ package body Langkit_Support.Lexical_Env is Recursive : Boolean := True; Rebindings : Env_Rebindings := null; Metadata : Element_Metadata := Empty_Metadata; - Stop_At_First : Boolean; Results : in out Entity_Vectors.Vector) is procedure Get_Refd_Elements (Self : Referenced_Env); @@ -469,7 +467,6 @@ package body Langkit_Support.Lexical_Env is then Current_Rebindings else Shed_Rebindings (Env, Current_Rebindings)), Metadata => Current_Metadata, - Stop_At_First => Stop_At_First, Results => Results); Dec_Ref (Env); @@ -528,9 +525,7 @@ package body Langkit_Support.Lexical_Env is Current_Metadata, Current_Rebindings) then - if Stop_At_First then - goto Early_Exit; - end if; + null; end if; end loop; @@ -556,10 +551,9 @@ package body Langkit_Support.Lexical_Env is (Parent_Env, Key, From, True, Parent_Rebindings, Current_Metadata, - Stop_At_First, Results); + Results); end if; - <<Early_Exit>> Dec_Ref (Env); end Get_Internal; @@ -584,7 +578,7 @@ package body Langkit_Support.Lexical_Env is end if; Get_Internal (Self, Key, From, Recursive, null, - Empty_Metadata, False, V); + Empty_Metadata, V); if Has_Trace then Traces.Trace (Me, "Returning vector with length " & V.Length'Image); @@ -619,7 +613,7 @@ package body Langkit_Support.Lexical_Env is end if; Get_Internal (Self, Key, From, Recursive, null, Empty_Metadata, - True, V); + V); if Has_Trace then Traces.Trace (Me, "Returning vector with length " & V.Length'Image);
Fix ChatAction.user_left was considered as user_kicked Closes
@@ -54,20 +54,9 @@ class ChatAction(EventBuilder): kicked_by=True, users=update.user_id) - elif isinstance(update, types.UpdateChannel): - # We rely on the fact that update._entities is set by _process_update - # This update only has the channel ID, and Telegram *should* have sent - # the entity in the Updates.chats list. If it did, check Channel.left - # to determine what happened. - peer = types.PeerChannel(update.channel_id) - channel = update._entities.get(utils.get_peer_id(peer)) - if channel is not None: - if isinstance(channel, types.ChannelForbidden) or channel.left: - return cls.Event(peer, - kicked_by=True) - else: - return cls.Event(peer, - added_by=True) + # UpdateChannel is sent if we leave a channel, and the update._entities + # set by _process_update would let us make some guesses. However it's + # better not to rely on this. Rely only in MessageActionChatDeleteUser. elif (isinstance(update, ( types.UpdateNewMessage, types.UpdateNewChannelMessage)) @@ -86,7 +75,7 @@ class ChatAction(EventBuilder): users=action.users) elif isinstance(action, types.MessageActionChatDeleteUser): return cls.Event(msg, - kicked_by=msg.from_id or True, + kicked_by=utils.get_peer_id(msg.from_id) if msg.from_id else True, users=action.user_id) elif isinstance(action, types.MessageActionChatCreate): return cls.Event(msg,
Fix virt.init documentation Adjust virt.init documentation to match reality: disks use 'name' property, not 'disk_name'.
@@ -1263,7 +1263,7 @@ def init(name, Disk dictionaries can contain the following properties: - disk_name + name Name of the disk. This is mostly used in the name of the disk image and as a key to merge with the profile data.
Update how_to_instantiate_a_data_context_on_a_databricks_spark_cluster.rst fixed broken links in `Additional resources` section
@@ -163,10 +163,8 @@ Additional notes Additional resources -------------------- -- How to create a Data Source in :ref:`Databricks AWS <_how_to_guides__configuring_datasources__how_to_configure_a_databricks_aws_datasource>` - -- How to create a Data Source in :ref:`Databricks Azure <_how_to_guides__configuring_datasources__how_to_configure_a_databricks_azure_datasource>` - +- How to create a Data Source in :ref:`Databricks AWS <how_to_guides__configuring_datasources__how_to_configure_a_databricks_aws_datasource>` +- How to create a Data Source in :ref:`Databricks Azure <how_to_guides__configuring_datasources__how_to_configure_a_databricks_azure_datasource>` .. discourse:: :topic_identifier: 320
Adds logic to EigenvalueParameterizedGate for larger-than-expected nullspace. Performing germ selection resulted in a case where the computed null space was larger than the minimum size needed. This seems ok, and so we just use the first <number-needed> nullspace vectors.
@@ -1874,12 +1874,16 @@ class EigenvalueParameterizedGate(GateMatrix): for ik,k in enumerate(evecIndsToMakeReal): vecs[:,ik] = self.B[:,k] V = _np.concatenate((vecs.real, vecs.imag), axis=1) - nullsp = _mt.nullspace(V); assert(nullsp.shape[1] == nToReal) - #assert we can find enough real linear combos! + nullsp = _mt.nullspace(V); + if nullsp.shape[1] < nToReal: #DEBUG + raise ValueError("Nullspace only has dimension %d when %d was expected! (i=%d, j=%d, blkSize=%d)\nevals = %s" \ + % (nullsp.shape[1],nToReal, i,j,blkSize,str(self.evals)) ) + assert(nullsp.shape[1] >= nToReal),"Cannot find enough real linear combos!" + nullsp = nullsp[:,0:nToReal] #truncate #cols if there are more than we need Cmx = nullsp[nToReal:,:] + 1j*nullsp[0:nToReal,:] # Cr + i*Ci new_vecs = _np.dot(vecs,Cmx) - assert(_np.linalg.norm(new_vecs.imag) < IMAG_TOL) + assert(_np.linalg.norm(new_vecs.imag) < IMAG_TOL), "Imaginary mag = %g!" % _np.linalg.norm(new_vecs.imag) for ik,k in enumerate(evecIndsToMakeReal): self.B[:,k] = new_vecs[:,ik] self.Bi = _np.linalg.inv(self.B)
Fixed small bug in `GenericCommand.{add,get,del,has}_setting()` that did not replace spaces in command when accessing setting.
@@ -3345,6 +3345,15 @@ class GenericCommand(gdb.Command): def post_load(self): pass + def __get_setting_name(self, name): + def __sanitize_class_name(clsname): + if " " not in clsname: + return clsname + return "-".join(clsname.split()) + + class_name = __sanitize_class_name(self.__class__._cmdline_) + return "{:s}.{:s}".format(class_name, name) + @property def settings(self): """Return the list of settings for this command.""" @@ -3352,21 +3361,21 @@ class GenericCommand(gdb.Command): if x.startswith("{:s}.".format(self._cmdline_)) ] def get_setting(self, name): - key = "{:s}.{:s}".format(self.__class__._cmdline_, name) + key = self.__get_setting_name(name) setting = __config__[key] return setting[1](setting[0]) def has_setting(self, name): - key = "{:s}.{:s}".format(self.__class__._cmdline_, name) + key = self.__get_setting_name(name) return key in __config__ def add_setting(self, name, value, description=""): - key = "{:s}.{:s}".format(self.__class__._cmdline_, name) + key = self.__get_setting_name(name) __config__[key] = [value, type(value), description] return def del_setting(self, name): - key = "{:s}.{:s}".format(self.__class__._cmdline_, name) + key = self.__get_setting_name(name) del __config__[key] return
manage.py: add support for --gargs TN:
@@ -23,8 +23,10 @@ PYTHON_LIB_ROOT = LANGKIT_ROOT / "contrib" / "python" def create_subparser( subparsers: _SubParsersAction, fn: Callable[..., None], + *, with_jobs: bool = False, with_no_lksp: bool = False, + with_gargs: bool = False, accept_unknown_args: bool = False, ) -> ArgumentParser: """ @@ -34,6 +36,7 @@ def create_subparser( :param bool with_jobs: Whether to create the --jobs/-j option. :param bool with_no_lksp: Whether to create the --no-langkit-support option. + :param bool with_gargs: Whether to create the --gargs option. """ subparser = subparsers.add_parser( name=fn.__name__.replace('_', '-'), @@ -59,6 +62,11 @@ def create_subparser( " We rebuild it by default, for the convenience of" " developers." ) + if with_gargs: + subparser.add_argument( + '--gargs', action='append', + help='Options appended to GPRbuild invocations.' + ) def wrapper(args: Namespace, rest: str): if len(rest) > 0: @@ -204,7 +212,8 @@ if __name__ == '__main__': parser = ArgumentParser(description="Global manage script for langkit") subparsers = parser.add_subparsers() - create_subparser(subparsers, build_langkit_support, with_jobs=True) + create_subparser(subparsers, build_langkit_support, with_jobs=True, + with_gargs=True) create_subparser(subparsers, setenv_langkit_support) package_deps_parser = create_subparser(subparsers, package_deps) @@ -216,7 +225,8 @@ if __name__ == '__main__': create_subparser(subparsers, make, with_jobs=True, - with_no_lksp=True) + with_no_lksp=True, + with_gargs=True) create_subparser(subparsers, setenv, with_no_lksp=True)
Allow to fake the currency rates. The envidonment variable KICOST_CURRENCY_RATES can be used to fake the cunrrency rates. It must indicate the name of an XML file containing the desired rates. The format is the one used by the European Central Bank.
""" Simple helper to download the exchange rates. """ +import os import sys from bs4 import BeautifulSoup @@ -25,6 +26,10 @@ url = 'https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml' def download_rates(): content = '' + if os.environ.get('KICOST_CURRENCY_RATES'): + with open(os.environ['KICOST_CURRENCY_RATES'], 'rt') as f: + content = f.read() + else: try: content = urlopen(url).read().decode('utf8') except URLError:
Style fix in compute popup Style fix in compute popup
<div class="col"> <p>Cluster alias:</p> </div> - <div class="col tooltip-wrap" (mouseover)="isEllipsisActive($event)"> + <div class="col" (mouseover)="isEllipsisActive($event)"> <span>{{resource.computational_name}}</span> <!-- <div class="tooltip" [style.visibility]="tooltip ? 'visible': 'hidden'">{{resource.computational_name}}--> <!-- </div>-->
Fix bug in TDict ordering Found this while working on unsafe stuff.
@@ -632,8 +632,8 @@ case class TDict(keyType: Type, valueType: Type) extends TContainer { extendOrderingToNull(missingGreatest)( Ordering.Iterable( Ordering.Tuple2( - elementType.ordering(missingGreatest), - elementType.ordering(missingGreatest)))) + keyType.ordering(missingGreatest), + valueType.ordering(missingGreatest)))) } }
Fixes typo in neutron_350.py There is a typo in the message that appears when an answerfile is generated using PackStack CLI. I removed an extra "O" from "chosen" & capitalized the project name Neutron. Closes-Bug:
@@ -445,8 +445,8 @@ def initConfig(controller): "USE_DEFAULT": False, "NEED_CONFIRM": False, "CONDITION": False, - "MESSAGE": ("You have choosen OVN neutron backend. Note that this backend does not support the VPNaaS or FWaaS services. " - "Geneve will be used as encapsulation method for tenant networks"), + "MESSAGE": ("You have chosen OVN Neutron backend. Note that this backend does not support the VPNaaS or FWaaS services. " + "Geneve will be used as the encapsulation method for tenant networks"), "MESSAGE_VALUES": ["ovn"]}, {"CMD_OPTION": "os-neutron-ml2-sriov-interface-mappings",
BUG: fixed data acknowledgements referencing Fixed a bug in the way that the instrument acknowledgements and references are accessed.
@@ -45,4 +45,4 @@ Citing the publication: To aid in scientific reproducibility, please include the version number in publications that use this code. This can be found by invoking `pysat.__version__ `. -Information for appropriately acknowledging and citing the different instruments accessed through pysat is sometimes available in the metadata through `inst.meta.info.acknowledgements` and `inst.meta.info.reference`. If this information is missing, please consider improving pysat by either submitting an issue or adding the information yourself. +Information for appropriately acknowledging and citing the different instruments accessed through pysat is sometimes available in the metadata through `inst.meta.info['acknowledgements']` and `inst.meta.info['reference']`. If this information is missing, please consider improving pysat by either submitting an issue or adding the information yourself.
flask_utils: add missing variable It's used with `global` so it must be defined first
@@ -32,7 +32,7 @@ from integration_tests.tests.utils import get_resource logger = setup_logger('Flask Utils', logging.INFO) - +security_config = None SCRIPT_PATH = '/tmp/reset_storage.py' CONFIG_PATH = '/tmp/reset_storage_config.json'
More documentation on caffe2::Operator Summary: Pull Request resolved:
@@ -602,6 +602,12 @@ class Operator : public OperatorBase { } ~Operator() noexcept override {} + /// Retrieve a non-owning reference to the input at position 'idx' for this + /// operator. The returned reference is valid for the duration of the + /// RunOnDevice call. The optional 'type' parameter can be used to assert a + /// required device type for the input (by default, we assert that the tensor + /// is consistent with the device type implied by the Context parameter of an + /// Operator.) inline const Tensor& Input( int idx, DeviceType type = Context::GetDeviceType()) { @@ -617,6 +623,54 @@ class Operator : public OperatorBase { return OperatorBase::XOutputTensor(idx, dims, options); } + /// Retrieve a non-owning pointer to the output at position 'idx', + /// initializing it to have size 'dims' and properties 'options' if + /// there is no pre-existing output or the pre-existing output does + /// not have the correct options. The returned pointer is valid for + /// the duration of the RunOnDevice call. If device is not explicitly + /// specified in options, we default to allocating output on the + /// current device of the device type implied by the Context parameter + /// of this Operator. + /// + /// Note [Operator::Output what?] + /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + /// The contract of Operator::Output is somewhat complex; it is perhaps better + /// understood in terms of what was historically an idiomatic Caffe2 operator + /// implementation: + /// + /// void RunOnDevice() override { + /// auto* output = Output(0, output_size, dtype<float>()); + /// float* output_ptr = output->data<float>(); + /// // write into output_ptr + /// } + /// + /// In the simple case, this code does the following things: + /// + /// 1. Allocates a new tensor with size 'output_size' and dtype 'float' + /// (and device type whatever the Operator's device type is) + /// 2. "Registers" this tensor as the 0th output tensor of this operator + /// (Caffe2 operators don't "return" outputs; instead, outputs + /// are shoved into an output vector which the executor reads out.) + /// 3. Returns the tensor, so the operator implementation can write + /// the actual output data into the tensor. + /// + /// So what's this business with "pre-existing" outputs? Caffe2 + /// commonly applies an optimization whereby it reuses tensors on + /// subsequent runs of operators in a graph. It doesn't know ahead + /// of time what intermediate tensors it will need, so the first + /// time it runs a graph it has all of the operators create the outputs + /// necessary (as described above). However, the second time around, + /// it will reuse all of the tensors created from the first time. + /// If they are lucky, this time the Output() call is a no-op and + /// just returns the old tensor. + /// + /// However, we cannot /guarantee/ that the output size will be the + /// same the next time the Operator is called; for example, output + /// size may be data dependent and vary between runs. In this case, + /// we have to resize it to the correct size. Resizing is still + /// helpful, as we may be able to fit the output in the same + /// space that was previously used. + /// Tensor* Output(int idx, at::IntList dims, at::TensorOptions options) { // We'll default device to the device of the current Operator Context if (options.device_opt() == c10::nullopt) {
refactor: model: Extract stream sorting to new functions. This is useful if model.pinned_streams or model.unpinned_streams needs to be sorted again in the future.
@@ -57,6 +57,13 @@ class ServerConnectionFailure(Exception): pass +def sort_streams(streams: List[List[str]]) -> None: + """ + Used for sorting model.pinned_streams and model.unpinned_streams. + """ + streams.sort(key=lambda s: s[0].lower()) + + class Model: """ A class responsible for storing the data to be displayed. @@ -570,6 +577,13 @@ class Model: for subscription in subscriptions: subscription['color'] = canonicalize_color(subscription['color']) + pinned_streams = [[stream[key] for key in stream_keys] + for stream in subscriptions if stream['pin_to_top']] + unpinned_streams = [[stream[key] for key in stream_keys] + for stream in subscriptions + if not stream['pin_to_top']] + sort_streams(pinned_streams) + sort_streams(unpinned_streams) # Mapping of stream-id to all available stream info # Stream IDs for muted streams # Limited stream info sorted by name (used in display) @@ -577,12 +591,8 @@ class Model: {stream['stream_id']: stream for stream in subscriptions}, {stream['stream_id'] for stream in subscriptions if stream['in_home_view'] is False}, - sorted([[stream[key] for key in stream_keys] - for stream in subscriptions if stream['pin_to_top']], - key=lambda s: s[0].lower()), - sorted([[stream[key] for key in stream_keys] - for stream in subscriptions if not stream['pin_to_top']], - key=lambda s: s[0].lower()) + pinned_streams, + unpinned_streams, ) def _group_info_from_realm_user_groups(self,
Minimalistic changes to allow successful execution of `keylime_ca -c listen` with `openssl` Basically, the problem stems from the fact that `openssl` produces an **empty** `cacrl.der` (and `cacrl.pem`). Resolves: ##261
@@ -319,6 +319,7 @@ def cmd_revoke(workingdir, name=None, serial=None): write_private(priv) # write out the CRL to the disk + if os.stat('cacrl.der').st_size : with open('cacrl.der', 'wb') as f: f.write(crl) convert_crl_to_pem("cacrl.der", "cacrl.pem") @@ -378,7 +379,7 @@ def cmd_listen(workingdir, cert_path): logger.info("checking CRL for expiration every hour") while True: try: - if os.path.exists('cacrl.der'): + if os.path.exists('cacrl.der') and os.stat('cacrl.der').st_size : retout = cmd_exec.run( "openssl crl -inform der -in cacrl.der -text -noout", lock=False)['retout'] for line in retout:
periodic plotting bugfix Following PR identified plotting issues with periodic boundary conditions. I've tried to now fix these up so that the proper number of prior dimensions are initialized and labels are properly transferred.
@@ -1411,15 +1411,6 @@ def boundplot(results, dims, it=None, idx=None, prior_transform=None, if (it is None and idx is None) or (it is not None and idx is not None): raise ValueError("You must specify either an iteration or an index!") - # TODO: npdim and label are undefined here! - - # Gather non-periodic boundary conditions. - if periodic is not None: - nonperiodic = np.ones(npdim, dtype='bool') - nonperiodic[periodic] = False - else: - nonperiodic = None - # Set defaults. plot_kwargs['marker'] = plot_kwargs.get('marker', 'o') plot_kwargs['linestyle'] = plot_kwargs.get('linestyle', 'None') @@ -1436,6 +1427,13 @@ def boundplot(results, dims, it=None, idx=None, prior_transform=None, raise ValueError("No bounds were saved in the results!") nsamps = len(results['samples']) + # Gather non-periodic boundary conditions. + if periodic is not None: + nonperiodic = np.ones(bounds[0].n, dtype='bool') + nonperiodic[periodic] = False + else: + nonperiodic = None + if it is not None: if it >= nsamps: raise ValueError("The iteration requested goes beyond the " @@ -1606,7 +1604,7 @@ def boundplot(results, dims, it=None, idx=None, prior_transform=None, axes.yaxis.set_major_formatter(sf) if labels is not None: axes.set_xlabel(labels[0], **label_kwargs) - axes.set_ylabel(label[1], **label_kwargs) + axes.set_ylabel(labels[1], **label_kwargs) else: axes.set_xlabel(r"$x_{"+str(dims[0]+1)+"}$", **label_kwargs) axes.set_ylabel(r"$x_{"+str(dims[1]+1)+"}$", **label_kwargs) @@ -1721,13 +1719,6 @@ def cornerbound(results, it=None, idx=None, prior_transform=None, if (it is None and idx is None) or (it is not None and idx is not None): raise ValueError("You must specify either an iteration or an index!") - # Gather non-periodic boundary conditions. - if periodic is not None: - nonperiodic = np.ones(npdim, dtype='bool') - nonperiodic[periodic] = False - else: - nonperiodic = None - # Set defaults. plot_kwargs['marker'] = plot_kwargs.get('marker', 'o') plot_kwargs['linestyle'] = plot_kwargs.get('linestyle', 'None') @@ -1744,6 +1735,13 @@ def cornerbound(results, it=None, idx=None, prior_transform=None, raise ValueError("No bounds were saved in the results!") nsamps = len(results['samples']) + # Gather non-periodic boundary conditions. + if periodic is not None: + nonperiodic = np.ones(bounds[0].n, dtype='bool') + nonperiodic[periodic] = False + else: + nonperiodic = None + if it is not None: if it >= nsamps: raise ValueError("The iteration requested goes beyond the "
Deleted headings, amended definition Deleted account and loan heading. Changed definition from interest accrued to interest accumulated.
@@ -5,12 +5,10 @@ schemas: [account, derivative_cash_flow, derivative, loan, security] --- # accrued_interest -#account -**accrued\_interest** represents the interest accrued *but unpaid* since the [last\_payment\_date][lpd] and due at the [next\_payment\_date][npd]. -Accrued interest is an accounting definition resulting from the [accrual basis][acc] of accounting. Generally speaking, this should be the amount reported on the income statement and the balance sheet (as not yet been paid/received). In other words, it is income earned or expenses incurred but not yet recognised in the revenues. +**accrued\_interest** represents the interest accumulated *but unpaid* since the [last\_payment\_date][lpd] and due at the [next\_payment\_date][npd]. -#loan +Accrued interest is an accounting definition resulting from the [accrual basis][acc] of accounting. Generally speaking, this should be the amount reported on the income statement and the balance sheet (as not yet been paid/received). In other words, it is income earned or expenses incurred but not yet recognised in the revenues. ---
Fixed styling for active and hover states. Prior to this, bootstrap's more specific rules would make active buttons the extra-dark blue "selected" style instead of the "purple" primary style.
margin-right: 5px; margin-bottom: 5px; - &.active { + &.active, &.active:focus, &.active:hover { // Styled similarly to .btn-primary // Don't bold font because .active often toggles on and off, and we don't want the size to change background-color: @call-to-action-mid;
m4: add download mirror * m4: add download mirror Fixes * m4: use new syntax to specify mirrors
sources: "1.4.19": - url: "https://ftp.gnu.org/gnu/m4/m4-1.4.19.tar.gz" + url: + - "https://ftp.gnu.org/gnu/m4/m4-1.4.19.tar.gz" + - "https://ftpmirror.gnu.org/gnu/m4/m4-1.4.19.tar.gz" sha256: "3be4a26d825ffdfda52a56fc43246456989a3630093cced3fbddf4771ee58a70" "1.4.18": - url: "https://ftp.gnu.org/gnu/m4/m4-1.4.18.tar.gz" + url: + - "https://ftp.gnu.org/gnu/m4/m4-1.4.18.tar.gz" + - "https://ftpmirror.gnu.org/gnu/m4/m4-1.4.18.tar.gz" sha256: "ab2633921a5cd38e48797bf5521ad259bdc4b979078034a3b790d7fec5493fab" patches: "1.4.19":
Update data-drift.md fixed broken link
@@ -76,7 +76,7 @@ You can also zoom on distributions to understand what has changed. ![](../.gitbook/assets/data_distr_by_feature.png) {% hint style="info" %} -To change the bins displayed, you can define [custom options](../../step-by-step-guides/report-customization/options-for-data-target-drift.md). +To change the bins displayed, you can define [custom options](../customization/options-for-data-target-drift.md). {% endhint %} ## Report customization
Remove outdated comment Should have been removed as part of commit
@@ -245,11 +245,7 @@ class CouchCaseUpdateStrategy(UpdateStrategy): return self def soft_rebuild_case(self, xforms=None): - """ - Rebuilds the case state in place from its actions. - - If strict is True, this will enforce that the first action must be a create. - """ + """Rebuilds the case state in place from its actions.""" xforms = xforms or {} self.reset_case_state() # try to re-sort actions if necessary
Add asteroids 162173 Ryugu (Q1385178) and 101955 Bennu (Q11558) Both are being used as globes for other items on Wikidata. See:
@@ -58,6 +58,7 @@ class Family(family.WikimediaFamily): """Supported globes for Coordinate datatype.""" return { 'ariel': 'http://www.wikidata.org/entity/Q3343', + 'bennu': 'http://www.wikidata.org/entity/Q11558', 'callisto': 'http://www.wikidata.org/entity/Q3134', 'ceres': 'http://www.wikidata.org/entity/Q596', 'deimos': 'http://www.wikidata.org/entity/Q7548', @@ -83,6 +84,7 @@ class Family(family.WikimediaFamily): 'phoebe': 'http://www.wikidata.org/entity/Q17975', 'pluto': 'http://www.wikidata.org/entity/Q339', 'rhea': 'http://www.wikidata.org/entity/Q15050', + 'ryugu': 'http://www.wikidata.org/entity/Q1385178', 'steins': 'http://www.wikidata.org/entity/Q150249', 'tethys': 'http://www.wikidata.org/entity/Q15047', 'titan': 'http://www.wikidata.org/entity/Q2565',
Fix get_dataset_stats - change SampleDocument to DatasetSampleDocument was wrong - it was originally ODMDatasetSample, not ODMSample
@@ -169,7 +169,7 @@ def get_dataset_stats(dataset): } } """ - _sample_doc_cls = type(dataset.name, (foo.SampleDocument,), {}) + _sample_doc_cls = type(dataset.name, (foo.DatasetSampleDocument,), {}) num_default_fields = len(_sample_doc_cls.get_field_schema()) field_names = [field_name for field_name in dataset.get_field_schema()]
macOS: Enhanced output for missing DLLs during dependency scan * Also normalize paths consistently for rpath and loader_path resolutions, for at least readability of output.
@@ -146,9 +146,9 @@ def _resolveBinaryPathDLLsMacOS( break else: # This is only a guess, might be missing package specific directories. - resolved_path = os.path.join(original_dir, path[7:]) + resolved_path = os.path.normpath(os.path.join(original_dir, path[7:])) elif path.startswith("@loader_path/"): - resolved_path = os.path.join(original_dir, path[13:]) + resolved_path = os.path.normpath(os.path.join(original_dir, path[13:])) elif os.path.basename(path) == os.path.basename(binary_filename): # We ignore the references to itself coming from the library id. continue @@ -161,8 +161,8 @@ def _resolveBinaryPathDLLsMacOS( raise NuitkaForbiddenDLLEncounter(binary_filename, "pyside6") inclusion_logger.sysexit( - "Error, failed to resolve DLL path %s (for %s), please report the bug." - % (path, binary_filename) + "Error, failed to find path %s (resolved DLL to %s) for %s, please report the bug." + % (path, resolved_path, binary_filename) ) # Some libraries depend on themselves.
[Datasets] Add test for reading CSV files without reading the first line as the header. This PR adds a test confirming that the user can manually supply column names as an alternative to reading a header line.
@@ -1932,7 +1932,7 @@ def test_csv_roundtrip(ray_start_regular_shared, fs, data_path): ], ) def test_csv_write_block_path_provider( - shutdown_only, + ray_start_regular_shared, fs, data_path, endpoint_url, @@ -1971,6 +1971,24 @@ def test_csv_write_block_path_provider( assert df.equals(ds_df) +# NOTE: The last test using the shared ray_start_regular_shared cluster must use the +# shutdown_only fixture so the shared cluster is shut down, otherwise the below +# test_write_datasource_ray_remote_args test, which uses a cluster_utils cluster, will +# fail with a double-init. +def test_csv_read_no_header(shutdown_only, tmp_path): + from pyarrow import csv + + file_path = os.path.join(tmp_path, "test.csv") + df = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]}) + df.to_csv(file_path, index=False, header=False) + ds = ray.data.read_csv( + file_path, + read_options=csv.ReadOptions(column_names=["one", "two"]), + ) + out_df = ds.to_pandas() + assert df.equals(out_df) + + class NodeLoggerOutputDatasource(Datasource[Union[ArrowRow, int]]): """A writable datasource that logs node IDs of write tasks, for testing."""
Update README with more detailed description The README shows up on PyPI, etc so it's good to make it more detailed.
-=================== -MLflow Beta Release -=================== +============================================= +MLflow: A Machine Learning Lifecycle Platform +============================================= + +MLflow is a platform to streamline machine learning development, including tracking experiments, packaging code +into reproducible runs, and sharing and deploying models. MLflow offers a set of lightweight APIs in that can +used with any existing machine learning application or library (TensorFlow, PyTorch, XGBoost, etc), wherever you +currently run ML code (e.g. in notebooks, standalone applications or the cloud). MLflow's current components are: + +* `MLflow Tracking <https://mlflow.org/docs/latest/tracking.html>`_: An API to log parameters, code, and + results in machine learning experiments and compare them using an interactive UI. +* `MLflow Projects <https://mlflow.org/docs/latest/projects.html>`_: A code packaging format for reproducible + runs using Conda and Docker, so you can share your ML code with others. +* `MLflow Models <https://mlflow.org/docs/latest/models.html>`_: A model packaging format and tools that let + you easily deploy the same model (from any ML library) to batch and real-time scoring on platforms such as + Docker, Apache Spark, Azure ML and AWS SageMaker. **Note:** The current version of MLflow is a beta release. This means that APIs and data formats -are subject to change! +are subject to change! However, the next release, MLflow 1.0, will stabilize these. -**Note 2:** We do not currently support running MLflow on Windows. Despite this, we would appreciate any contributions -to make MLflow work better on Windows. +**Note 2:** The released versions of MLflow currently do not run on Windows, although the master branch can. Installing ---------- @@ -77,9 +89,6 @@ MLflow artifacts and then load them again for serving. There is an example train $ curl -d '{"columns":[0],"index":[0,1],"data":[[1],[-1]]}' -H 'Content-Type: application/json' localhost:5000/invocations - - - Contributing ------------ We happily welcome contributions to MLflow. Please see our `contribution guide <CONTRIBUTING.rst>`_
Update running_pets.md Updated doc as well line 211.
@@ -208,7 +208,7 @@ For running the training Cloud ML job, we'll configure the cluster to use 5 training jobs and three parameters servers. The configuration file can be found at `object_detection/samples/cloud/cloud.yml`. -Note: This sample is supported for use with 1.8 runtime version. +Note: The code sample below is supported for use with 1.9 runtime version. To start training and evaluation, execute the following command from the `tensorflow/models/research/` directory:
fix shadowed variable name Summary: When compiled with -Werror=shadow-compatible-local, cannot reuse a variable name. This passed our tests, but some people use stronger settings to compile.
@@ -50,9 +50,9 @@ void Context::connectFullMesh( allBytes.insert(allBytes.end(), addrBytes.begin(), addrBytes.end()); } - std::ostringstream key; - key << rank; - store.set(key.str(), allBytes); + std::ostringstream storeKey; + storeKey << rank; + store.set(storeKey.str(), allBytes); // Connect every pair for (int i = 0; i < size; i++) {
(Games Cog): Updated task repeating cooldown time. Changed `hours` argument in `refresh_genres_task` from `1.0` to `24.0` due no need for so fast updating.
@@ -138,7 +138,7 @@ class Games(Cog): self.refresh_genres_task.start() - @tasks.loop(hours=1.0) + @tasks.loop(hours=24.0) async def refresh_genres_task(self) -> None: """Refresh genres in every hour.""" try:
Use tezos/opam-repository fork Problem: It's inconvenient to manually find suitable versions of the opam packages in hacks.nix:( Solutiom: Use tezos/opam-repository fork which has only suitable versions for all dependencies.
}, "opam-repository": { "branch": "master", - "description": "Main public package repository for OPAM, the source package manager of OCaml.", - "homepage": "https://opam.ocaml.org", - "owner": "ocaml", + "description": "Tezos opam-repository fork.", + "owner": "tezos", "repo": "opam-repository", - "rev": "3e92793804b7f34fca09fb17e2ebafd6d29d90cf", - "sha256": "175fdlydmwn6kb09dyz2pz8z2wdsa2lkfys0yqdgv237m67mk2yh", + "rev": "393349af19bb54e3cb790ac8ef54a72adc71aecf", + "sha256": "1b89v9488qq0nxjbr249h8mcm6fgz6dyz6ph83gkf3yi83mpqcv0", "type": "tarball", - "url": "https://github.com/ocaml/opam-repository/archive/3e92793804b7f34fca09fb17e2ebafd6d29d90cf.tar.gz", - "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz" + "url": "https://gitlab.com/tezos/opam-repository/-/archive/393349af19bb54e3cb790ac8ef54a72adc71aecf.tar.gz", + "url_template": "https://gitlab.com/<owner>/<repo>/-/archive/<rev>.tar.gz" }, "serokell-nix": { "branch": "master",
Add test for short non-local number Test case for issue
@@ -327,3 +327,8 @@ class ExampleNumbersTest(unittest.TestCase): short_metadata = PhoneMetadata.short_metadata_for_region("GB") result = str(short_metadata) self.assertTrue(result.startswith("PhoneMetadata(id='GB', country_code=None, international_prefix=None")) + + def testGBLocalNumberLength(self): + # Python version extra test. Issue #172. + numobj = phonenumberutil.parse("+4408001111", "GB") + self.assertEqual("+44 800 1111", phonenumberutil.format_number(numobj, phonenumberutil.PhoneNumberFormat.INTERNATIONAL))
gae.py: make it work for milo Milo's "logs" module uses property "service" instead of "module" and does not specify "application". Check "service" property and fallback to "module". Fail with a nice error message if application id is not specifed in the default module and not provided explictly. Review-Url:
@@ -236,7 +236,7 @@ class Application(object): for yaml_path in find_app_yamls(self._app_dir): with open(yaml_path) as f: data = yaml.load(f) - module_id = data.get('module', 'default') + module_id = data.get('service', data.get('module', 'default')) if module_id in self._modules: raise ValueError( 'Multiple *.yaml files define same module %s: %s and %s' % @@ -249,6 +249,9 @@ class Application(object): if 'default' not in self._modules: raise ValueError('Default module is missing') + if not self.app_id: + raise ValueError('application id is neither specified in default module, ' + 'nor provided explicitly') @property def app_dir(self): @@ -258,7 +261,7 @@ class Application(object): @property def app_id(self): """Application ID as passed to constructor, or as read from app.yaml.""" - return self._app_id or self._modules['default'].data['application'] + return self._app_id or self._modules['default'].data.get('application') @property def modules(self):
Reload doesn't kill operations. Removes elements and orphan opnodes.
@@ -3201,7 +3201,8 @@ class Elemental(Modifier): @self.tree_operation(_("Reload {name}"), node_type="file", help="") def reload_file(node, **kwargs): filepath = node.filepath - self.clear_elements_and_operations() + self.clear_elements() + self.remove_orphaned_opnodes() self.load(filepath) @self.tree_submenu(_("Duplicate"))
add suggestion to use lld to CONTRIBUTING.md Summary: I found this significantly speeds up incremental builds. Pull Request resolved:
@@ -349,6 +349,16 @@ ccache -F 0 # deploy (and add to ~/.bashrc for later) export PATH="/usr/lib/ccache:$PATH" ``` +#### Use a faster linker +If you are editing a single file and rebuilding in a tight loop, the time spent +linking will dominate. The system linker available in most Linux distributions +(GNU `ld`) is quite slow. Use a faster linker, like [lld](https://lld.llvm.org/). + +The easiest way to use `lld` this is download the +[latest LLVM binaries](http://releases.llvm.org/download.html#8.0.0) and run: +``` +ln -s /path/to/downloaded/ld.lld /usr/local/bin/ld +``` ## CUDA Development tips
Index task_hashsum to give cross-run query speedup Practical experience with wstat has shown this index to give great speedup when making queries which match up tasks between runs based on their checkpointing hashsum.
@@ -131,7 +131,7 @@ class Database: task_depends = Column('task_depends', Text, nullable=True) task_func_name = Column('task_func_name', Text, nullable=False) task_memoize = Column('task_memoize', Text, nullable=False) - task_hashsum = Column('task_hashsum', Text, nullable=True) + task_hashsum = Column('task_hashsum', Text, nullable=True, index=True) task_inputs = Column('task_inputs', Text, nullable=True) task_outputs = Column('task_outputs', Text, nullable=True) task_stdin = Column('task_stdin', Text, nullable=True)
Fix email role enum issue Fix bug where email generated after changing user role shows enum Fix similar issue with Team Project Role email
@@ -2172,7 +2172,7 @@ def change_organization_role( user=role.user, submitter=request.user, organization_name=organization.name, - role=role.role_name, + role=role.role_name.value, ) send_role_changed_as_organization_member_email( @@ -2180,7 +2180,7 @@ def change_organization_role( role.user, submitter=request.user, organization_name=organization.name, - role=role.role_name, + role=role.role_name.value, ) organization.record_event( @@ -4509,7 +4509,7 @@ def change_team_project_role(project, request, _form_class=ChangeTeamProjectRole team=role.team, submitter=request.user, project_name=project.name, - role=role.role_name, + role=role.role_name.value, ) send_role_changed_as_team_collaborator_email( request,
Add two more invalid serialization tests Those tests are about missing "keys" and "roles" attributes in Targets.Delegations.
@@ -319,6 +319,13 @@ class TestSerialization(unittest.TestCase): invalid_delegations: utils.DataSet = { "empty delegations": "{}", + "missing keys": '{ "roles": [ \ + {"keyids": ["keyid"], "name": "a", "terminating": true, "paths": ["fn1"], "threshold": 3}, \ + {"keyids": ["keyid2"], "name": "b", "terminating": true, "paths": ["fn2"], "threshold": 4} ] \ + }', + "missing roles": '{"keys": { \ + "keyid1" : {"keytype": "rsa", "scheme": "rsassa-pss-sha256", "keyval": {"public": "foo"}}, \ + "keyid2" : {"keytype": "ed25519", "scheme": "ed25519", "keyval": {"public": "bar"}}}}', "bad keys": '{"keys": "foo", \ "roles": [{"keyids": ["keyid"], "name": "a", "paths": ["fn1", "fn2"], "terminating": false, "threshold": 3}]}', "bad roles": '{"keys": {"keyid" : {"keytype": "rsa", "scheme": "rsassa-pss-sha256", "keyval": {"public": "foo"}}}, \
Update modular_commands.rst Minor fix to use cmd2 intead of cmd in example text.
@@ -44,7 +44,7 @@ functions with ``help_``, and completer functions with ``complete_``. A new decorator ``with_default_category`` is provided to categorize all commands within a CommandSet in the same command category. Individual commands in a CommandSet may be override the default category by specifying a -specific category with ``cmd.with_category``. +specific category with ``cmd2.with_category``. CommandSet command methods will always expect the same parameters as when defined in a ``cmd2.Cmd`` sub-class, except that ``self`` will now refer to the ``CommandSet`` instead of the cmd2 instance. The cmd2 instance can
Apply suggestions from code review Mainly LaTeX refinements
@@ -15,7 +15,7 @@ grand_parent: COVIDcast API 1. TOC {:toc} -## COVID-19 tests +## COVID-19 Tests * **First issued:** * **Number of data revisions since 19 May 2020:** 0 @@ -41,8 +41,8 @@ StorageDate, patient age, and unique identifiers for the device on which the test was performed, the individual test, and the result. Multiple tests are stored on each device. -Let n be the number of total COVID tests taken over a given time period and a -given location (the test result can be negative/positive/invalid). Let x be the +Let $$n$$ be the number of total COVID tests taken over a given time period and a +given location (the test result can be negative, positive, or invalid). Let $$x$$ be the number of tests taken with positive results in this location over the given time period. We are interested in estimating the percentage of positive tests which is defined as: @@ -56,9 +56,9 @@ We estimate p across 3 temporal-spatial aggregation schemes: - daily, at the HRR (hospital referral region) level; - daily, at the state level. -**MSA and HRR levels**: In a given MSA or HRR, suppose N COVID tests are taken -in a certain time period, X is the number of tests taken with positive -results. If $$N >= 50$$, we simply use: +**MSA and HRR levels**: In a given MSA or HRR, suppose $$N$$ COVID tests are taken +in a certain time period, $$X$$ is the number of tests taken with positive +results. If $$N \geq 50$$, we simply use: $$ p = \frac{100 X}{N} @@ -68,13 +68,13 @@ If $$N < 50$$, we lend $$50 - N$$ fake samples from its home state to shrink the estimate to the state's mean, which means: $$ -p = 100 \[ \frac{N}{50} * \frac{X}{N} + \frac{50 - N}{50} * \frac{X_s}{N_s} \] +p = 100 \left( \frac{N}{50} \frac{X}{N} + \frac{50 - N}{50} \frac{X_s}{N_s} \right) $$ where $$N_s, X_s$$ are the number of COVID tests and the number of COVID tests taken with positive results taken in its home state in the same time period. -**State level**: the states with fewer than 50 samples are discarded. For the +**State level**: the states with fewer than 50 tests are discarded. For the rest of the states with sufficient samples, $$ @@ -87,7 +87,7 @@ We assume the estimates for each time point follow a binomial distribution. The estimated standard error then is: $$ -se = \frac{1/100} sqrt{ \frac{p(1-p)}{N} } +\text{se} = \frac{1/100} \sqrt{ \frac{p(1-p)}{N} } $$ #### Smoothing @@ -129,7 +129,7 @@ a reported estimate for, say, June 10th may first be available in the API on June 14th and subsequently revised on June 16th. -## Flu tests +## Flu Tests * **First issued:** 20 April 2020 * **Last issued:** 19 May 2020
insecure_skip_verify works only on telegraf 1.4. skip it for now HG-- branch : feature/microservices
[[inputs.nginx]] ## An array of Nginx stub_status URI to gather stats. urls = ["https://{{ noc_web_host }}/ng_stats"] +{# waits for telegraf 1.4 insecure_skip_verify = {% if nginx_self_signed_cerificate %}true{% else %}false{% endif %} - +#} [[inputs.procstat]] pid_file = "/var/run/nginx.pid"
[bugfix] Remove description parameter called with changeCommonscat The description parameter of changeCommonscat was never used and removed recently. The method must not be called with it.
@@ -305,7 +305,7 @@ class CommonscatBot(ConfigParserBot, ExistingPageBot, NoRedirectPageBot): self.changeCommonscat(page, currentCommonscatTemplate, currentCommonscatTarget, primaryCommonscat, - checkedCommonscatTarget, LinkText, Note) + checkedCommonscatTarget, LinkText) return # Commonscat link is wrong
Add query current minion config with config.items Fixes
@@ -461,3 +461,16 @@ def gather_bootstrap_script(bootstrap=None): ret = salt.utils.cloud.update_bootstrap(__opts__, url=bootstrap) if 'Success' in ret and len(ret['Success']['Files updated']) > 0: return ret['Success']['Files updated'][0] + +def items(): + ''' + Return the complete config from the currently running minion process. + This includes defaults for values not set in the config file. + + CLI Example: + + .. code-block:: bash + + salt '*' config.items + ''' + return __opts__
Viewer : Apply userDefaults to Views This allows config files to be used to configure the Viewer. For instance, the following changes the default display transform for the ImageView : ``` Gaffer.Metadata.registerValue( GafferImageUI.ImageView, "displayTransform", "userDefault", "rec709" ) ```
@@ -195,6 +195,7 @@ class Viewer( GafferUI.NodeSetEditor ) : if self.__currentView is None : self.__currentView = GafferUI.View.create( plug ) if self.__currentView is not None: + Gaffer.NodeAlgo.applyUserDefaults( self.__currentView ) self.__currentView.setContext( self.getContext() ) self.__views.append( self.__currentView ) # if we succeeded in getting a suitable view, then
doc/motors: hide status getters These are not yet implemented
@@ -107,14 +107,6 @@ These are all instances of the ``Control`` class given below. .. autoclass:: pybricks.builtins.Control :no-members: - .. rubric:: Control status - - .. automethod:: pybricks.builtins.Control.stalled - - .. automethod:: pybricks.builtins.Control.active - - .. rubric:: Control settings - .. automethod:: pybricks.builtins.Control.limits .. automethod:: pybricks.builtins.Control.pid
Bugfix adopt Werkzeug's timestamp parsing This uses the Response last_modified setter to parse the timestamp, which is consistent with Flask and also fixes a DST bug.
@@ -373,7 +373,7 @@ async def send_file( attachment_filename = file_path.name file_body = current_app.response_class.file_body_class(file_path) if last_modified is None: - last_modified = datetime.fromtimestamp(file_path.stat().st_mtime) + last_modified = file_path.stat().st_mtime # type: ignore if cache_timeout is None: cache_timeout = current_app.get_send_file_max_age(str(file_path)) etag = "{}-{}-{}".format(
Ray Interpolation functions Two interpolating functions to obtain a ray along a ray trace and along the line defined by two rays in z.
@@ -174,6 +174,86 @@ class Ray: return rays + @staticmethod + def along(ray1, ray2, z): + """This function returns a ray at position z using the line defined + by ray1 and ray2. y and theta are linearly interpolated. + + Parameters + ---------- + ray1 : Ray + First ray + ray2 : Ray + Second ray + z : float + Position in z where we want the output ray + + Returns + ------- + ray : an interpolated Ray + A Ray at position z, with y and theta interpolated from ray1 and ray2 + + Notes + ----- + If the first ray is blocked, then None is returned + + See Also + -------- + raytracing.Ray.alongTrace() + + """ + distance = ray2.z-ray1.z + if ray1.isBlocked: + return None + + if distance > 0: + slopeY = (ray2.y-ray1.y)/distance + slopeTheta = (ray2.theta-ray1.theta)/distance + dz = (z-ray1.z) + return Ray(y=ray1.y + dz * slopeY, theta=ray1.theta + dz * slopeTheta, z=z) + else: + return ray1 + + @staticmethod + def alongTrace(rayTrace, z): + """This function returns a ray at position z along the ray trace. + y and theta are linearly interpolated in between the two closest rays. + + Parameters + ---------- + rayTrace : list of Ray + The rayTrace + z : float + Position in z where we want the output ray + + Returns + ------- + ray : an interpolated Ray + A Ray at position z, with y and theta interpolated from the ray trace + + Notes + ----- + If the ray at an earlier z is blocked, then None is returned + + See Also + -------- + raytracing.Ray.along() + + """ + closestRay = rayTrace[0] + for ray in rayTrace: + if closestRay.isBlocked: + return None + + if ray.z == z: + if ray.isNotBlocked: + return ray + + if ray.z > z: + return Ray.along(ray1=closestRay,ray2=ray, z=z) + closestRay = ray + + def __str__(self): """String description that allows the use of print(Ray())."""
Parse gradient, hessian and atommasses from fchk Using the _parse_block method
@@ -171,6 +171,30 @@ class FChk(logfileparser.Logfile): if line[0:11] == 'Shell types': self.parse_aonames(line, inputfile) + if line[0:19] == 'Real atomic weights': + count = int(line.split()[-1]) + assert count == self.natom + + atommasses = numpy.array(self._parse_block(inputfile, count, float, 'Atomic Masses')) + + self.set_attribute('atommasses', atommasses) + + if line[0:18] == 'Cartesian Gradient': + count = int(line.split()[-1]) + assert count == self.natom*3 + + gradient = numpy.array(self._parse_block(inputfile, count, float, 'Gradient')) + + self.set_attribute('grads', gradient) + + if line[0:25] == 'Cartesian Force Constants': + count = int(line.split()[-1]) + assert count == (3*self.natom*(3*self.natom+1))/2 + + hessian = numpy.array(self._parse_block(inputfile, count, float, 'Gradient')) + + self.set_attribute('hessian', hessian) + def parse_aonames(self, line, inputfile): # e.g.: Shell types I N= 28 count = int(line.split()[-1])
Handle unregistered transformations more gracefully deserialize unregistered transformations as 'dummy transformations', which are instances of the base Transformation or SubgraphTransformation classes with all the necessary information attached to re-searialize them without loss of information.
@@ -153,8 +153,8 @@ class Transformation(TransformationBase): self.sdfg_id = sdfg_id self.state_id = state_id - expr = self.expressions()[expr_index] if not override: + expr = self.expressions()[expr_index] for value in subgraph.values(): if not isinstance(value, int): raise TypeError('All values of ' @@ -372,6 +372,13 @@ class Transformation(TransformationBase): def to_json(self, parent=None) -> Dict[str, Any]: props = serialize.all_properties_to_json(self) + if hasattr(self, 'dummy_for'): + return { + 'type': 'Transformation', + 'transformation': self.dummy_for, + **props + } + else: return { 'type': 'Transformation', 'transformation': type(self).__name__, @@ -385,6 +392,23 @@ class Transformation(TransformationBase): xform = next(ext for ext in Transformation.extensions().keys() if ext.__name__ == json_obj['transformation']) except StopIteration: + # This json object indicates that it is a transformation, but the + # corresponding serializable class cannot be found (i.e. the + # transformation is not known to DaCe). Handle this by creating a + # dummy transformation to keep serialization intact. + if all(attr in json_obj for attr in [ + 'sdfg_id', 'state_id', '_subgraph', 'expr_index', + 'transformation' + ]): + xform = Transformation( + sdfg_id=json_obj['sdfg_id'], + state_id=json_obj['state_id'], + subgraph=json_obj['_subgraph'], + expr_index=json_obj['expr_index'], + override=True + ) + xform.dummy_for = json_obj['transformation'] + return xform return None # Recreate subgraph @@ -697,6 +721,13 @@ class SubgraphTransformation(TransformationBase): def to_json(self, parent=None): props = serialize.all_properties_to_json(self) + if hasattr(self, 'dummy_for'): + return { + 'type': 'SubgraphTransformation', + 'transformation': self.dummy_for, + **props + } + else: return { 'type': 'SubgraphTransformation', 'transformation': type(self).__name__, @@ -710,6 +741,20 @@ class SubgraphTransformation(TransformationBase): xform = next(ext for ext in SubgraphTransformation.extensions().keys() if ext.__name__ == json_obj['transformation']) except StopIteration: + # This json object indicates that it is a transformation, but the + # corresponding serializable class cannot be found (i.e. the + # transformation is not known to DaCe). Handle this by creating a + # dummy transformation to keep serialization intact. + if all(attr in json_obj for attr in [ + 'sdfg_id', 'state_id', '_subgraph', 'transformation' + ]): + xform = SubgraphTransformation( + sdfg_id=json_obj['sdfg_id'], + state_id=json_obj['state_id'], + subgraph=json_obj['_subgraph'] + ) + xform.dummy_for = json_obj['transformation'] + return xform return None # Reconstruct transformation
Reduce code duplication in JpegCompression This patch decreases code duplication in the parameter parsing of augmenters.arithmetic.JpegCompression by using the parameter handling functions in parameters.py. It also enables the compression parameter to convert a list of values to Choice.
@@ -1573,14 +1573,17 @@ class JpegCompression(Augmenter): Parameters ---------- - compression : int or tuple of two ints or StochasticParameter + compression : number or tuple of two number or list of number or StochasticParameter Degree of compression using saving to `jpeg` format in range [0, 100] High values for compression cause more artifacts. Standard value for image processing software is default value set to between 50 and 80. At 100 image is unreadable and at 0 no compression is used and the image occupies much more memory. - * If a single int, then that value will be used for the compression degree. - * If a tuple of two ints (a, b), then the compression will be a + + * If a single number, then that value will be used for the compression degree. + * If a tuple of two number (a, b), then the compression will be a value sampled from the interval [a..b]. + * If a list, then a random value will be sampled and used as the + compression per image. * If a StochasticParameter, then N samples will be drawn from that parameter per N input images, each representing the compression for the nth image. Expected to be discrete. @@ -1602,16 +1605,9 @@ class JpegCompression(Augmenter): """ def __init__(self, compression=50, name=None, deterministic=False, random_state=None): super(JpegCompression, self).__init__(name=name, deterministic=deterministic, random_state=random_state) - if ia.is_single_number(compression): - ia.do_assert(100 >= compression >= 0 and ia.is_single_integer(compression), "Expected compression to have range [0, 100], got value %.4f." % (compression,)) - self.compression = Deterministic(compression) - elif ia.is_iterable(compression): - ia.do_assert(len(compression) == 2, "Expected tuple/list with 2 entries, got %d entries." % (len(compression),)) - self.compression = Uniform(compression[0], compression[1]) - elif isinstance(compression, StochasticParameter): - self.compression = compression - else: - raise Exception("Expected float or int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(compression),)) + + # will be converted to int during augmentation, which is why we allow floats here + self.compression = iap.handle_continuous_param(compression, "compression", value_range=(0, 100), tuple_to_uniform=True, list_to_choice=True) # The value range 1 to 95 is suggested by PIL's save() documentation # Values above 95 seem to not make sense (no improvement in visual quality, but large file size)
Supply default value to call to get_device_setting to prevent DeviceProvisioned error that prevents setup wizard page from loading.
@@ -128,7 +128,9 @@ class FrontEndCoreAppAssetHook(WebpackBundleHook): "languageGlobals": self.language_globals(), "oidcProviderEnabled": OIDCProviderHook.is_enabled(), "kolibriTheme": ThemeHook.get_theme(), - "isSubsetOfUsersDevice": get_device_setting("subset_of_users_device"), + "isSubsetOfUsersDevice": get_device_setting( + "subset_of_users_device", False + ), } def language_globals(self):
Fix "Notebook loading error" on colab * Fix "Notebook loading error" on colab There was a missing comma after the array element on line 46, so I added it. From what I can tell, this was causing the colab to be unable to load. * Update AIDungeon_2.ipynb
"\n", "## About\n", "* While you wait you can [read adventures others have had](https://aidungeon.io/)\n", - "* [Read more](https://pcc.cs.byu.edu/2019/11/21/ai-dungeon-2-creating-infinitely-generated-text-adventures-with-deep-learning-language-models/) about how AI Dungeon 2 is made." - "- **[Support AI Dungeon 2](https://www.patreon.com/bePatron?u=19115449) on Patreon to help me to continue improving the game with all the awesome ideas I have for its future!**\n", + "* [Read more](https://pcc.cs.byu.edu/2019/11/21/ai-dungeon-2-creating-infinitely-generated-text-adventures-with-deep-learning-language-models/) about how AI Dungeon 2 is made.", + "- **[Support AI Dungeon 2](https://www.patreon.com/bePatron?u=19115449) on Patreon to help me to continue improving the game with all the awesome ideas I have for its future!**\n" ] }, {
filestore-to-bluestore: skip bluestore osd nodes If the OSD node is already using bluestore OSDs then we should skip all the remaining tasks to avoid purging OSD for nothing. Instead we warn the user. Closes:
- import_role: name: ceph-defaults + - name: set_fact current_objectstore + set_fact: + current_objectstore: '{{ osd_objectstore }}' + + - name: warn user about osd already using bluestore + debug: + msg: 'WARNING: {{ inventory_hostname }} is already using bluestore. Skipping all tasks.' + when: current_objectstore == 'bluestore' + + - name: shrink and redeploy filestore osds + when: current_objectstore == 'filestore' + block: - import_role: name: ceph-facts filter: ansible_devices when: osd_auto_discovery | bool + - name: force osd_objectstore to bluestore + set_fact: + osd_objectstore: bluestore + - import_role: name: ceph-defaults - import_role: when: containerized_deployment | bool - import_role: name: ceph-config - vars: - osd_objectstore: bluestore - import_role: name: ceph-osd - vars: - osd_objectstore: bluestore
Patch 7 * Update custom-buttons.md Set the 'updatemenu method' link directly to the method instead of the page header. * Update custom-buttons.md Bolded phrases and additional explanation in the "methods" section.
@@ -34,10 +34,10 @@ jupyter: --- #### Methods -The [updatemenu method](https://plot.ly/python/reference/#layout-updatemenus-buttons-method) determines which [plotly.js function](https://plot.ly/javascript/plotlyjs-function-reference/) will be used to modify the chart. There are 4 possible methods: -- `"restyle"`: modify data or data attributes -- `"relayout"`: modify layout attributes -- `"update"`: modify data **and** layout attributes +The [updatemenu method](https://plot.ly/python/reference/#layout-updatemenus-items-updatemenu-buttons-items-button-method) determines which [plotly.js function](https://plot.ly/javascript/plotlyjs-function-reference/) will be used to modify the chart. There are 4 possible methods: +- `"restyle"`: modify **data** or data attributes +- `"relayout"`: modify **layout** attributes +- `"update"`: modify **data and layout** attributes; combination of `"restyle"` and `"relayout"` - `"animate"`: start or pause an [animation](https://plot.ly/python/#animations))
change fields to CustomFields in stixParser change fields to CustomFields
@@ -14,7 +14,7 @@ script: |- i = 0 def create_new_ioc(data, i, time, pkg_id, ind_id): data.append({}) - data[i]['fields'] = {'indicator_id': ind_id, 'stix_package_id':pkg_id} + data[i]['CustomFields'] = {'indicator_id': ind_id, 'stix_package_id':pkg_id} data[i]['source'] = ind_id.split(':')[0] if time is not None: data[i]['timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
[bugfix] Fix wikibase_tests.py after use _test_new_empty in DataCollectionTestCase which is called by each subclass use self.assertIsEmpty(result) instead of self.assertLength(result, 0) self.assert(Not)In(attr, item.__dict__) to verify that an attribute exists. hasattr() would load the attributes. use subTests
@@ -55,19 +55,6 @@ class WbRepresentationTestCase(WikidataTestCase): self.assertLength(set(list_of_dupes), 1) -class DataCollectionTestCase(WikidataTestCase): - - """Test case for a Wikibase collection class.""" - - collection_class = None - - def test_new_empty(self): - """Test that new_empty method returns empty collection.""" - cls = self.collection_class - result = cls.new_empty(self.get_repo()) - self.assertLength(result, 0) - - class TestLoadRevisionsCaching(BasePageLoadRevisionsCachingTestBase, WikidataTestCase): @@ -969,11 +956,14 @@ class TestItemLoad(WikidataTestCase): attrs = ['_content', 'labels', 'descriptions', 'aliases', 'claims', 'sitelinks'] for attr in attrs: - self.assertFalse(hasattr(item, attr)) + with self.subTest(attr=attr, note='before loading'): + # hasattr() loads the attributes; use item.__dict__ for tests + self.assertNotIn(attr, item.__dict__) item.labels # trigger loading for attr in attrs: - self.assertTrue(hasattr(item, attr)) + with self.subTest(attr=attr, note='after loading'): + self.assertIn(attr, item.__dict__) def test_load_item_set_id(self): """Test setting item.id attribute on empty item.""" @@ -1789,6 +1779,19 @@ class TestLinks(WikidataTestCase): self.assertLength(wvlinks, 2) +class DataCollectionTestCase(WikidataTestCase): + + """Test case for a Wikibase collection class.""" + + collection_class = None + + def _test_new_empty(self): + """Test that new_empty method returns empty collection.""" + cls = self.collection_class + result = cls.new_empty(self.get_repo()) + self.assertIsEmpty(result) + + class TestLanguageDict(DataCollectionTestCase): """Test cases covering LanguageDict methods.""" @@ -1865,6 +1868,10 @@ class TestLanguageDict(DataCollectionTestCase): LanguageDict.normalizeData(self.lang_out), {'en': {'language': 'en', 'value': 'foo'}}) + def test_new_empty(self): + """Test that new_empty method returns empty collection.""" + self._test_new_empty() + class TestAliasesDict(DataCollectionTestCase): @@ -1962,6 +1969,10 @@ class TestAliasesDict(DataCollectionTestCase): ]} self.assertEqual(AliasesDict.normalizeData(data_in), data_out) + def test_new_empty(self): + """Test that new_empty method returns empty collection.""" + self._test_new_empty() + class TestWriteNormalizeData(TestCase):
docs: fix graphql query to return failed runs Summary: These queries should be under test. Fixing the immediate problem first. Test Plan: run query locally Reviewers: sashank, sidkmenon
@@ -116,7 +116,7 @@ The `pipelineRunsOrError` query also takes in an optional filter argument, of ty For example, the following query will return all failed runs: query FilteredPipelineRuns { - pipelineRunsOrError(filter: { status: FAILURE }) { + pipelineRunsOrError(filter: { statuses: [FAILURE] }) { __typename ... on PipelineRuns { results {
[risksense-835] RiskSense Integration ### Enhancement - Human readable format change in app detail command
@@ -1840,7 +1840,7 @@ script: description: The state of the ticket associated with the application. type: String description: This command is used to lookup single application details in depth. Command accepts application id as an argument. - dockerimage: demisto/python3:3.8.1.6120 + dockerimage: demisto/python3:3.8.1.6442 isfetch: false longRunning: false longRunningPort: false
Creating convert_to_tfrecord function Changing file to follow examples/how_tos/reading_data/convert_to_records.py
@@ -58,16 +58,9 @@ def read_pickle_from_file(filename): return data_dict -def main(argv): - del argv # Unused. - - file_names = _get_file_names() - for file_name in file_names: - input_file = os.path.join(FLAGS.input_dir, file_name) - output_file = os.path.join(FLAGS.output_dir, file_name + '.tfrecords') - +def convert_to_tfrecord(input_file, name): + """Converts a file to tfrecords.""" print('Generating %s' % output_file) - record_writer = tf.python_io.TFRecordWriter(output_file) data_dict = read_pickle_from_file(input_file) @@ -84,6 +77,16 @@ def main(argv): record_writer.write(example.SerializeToString()) record_writer.close() +def main(argv): + del argv # Unused. + + file_names = _get_file_names() + for file_name in file_names: + input_file = os.path.join(FLAGS.input_dir, file_name) + output_file = os.path.join(FLAGS.output_dir, file_name + '.tfrecords') + # Convert to Examples and write the result to TFRecords. + convert_to_tfrecord(input_file, output_file) + print('Done!')
Ensure we can serialize protobuf messages if present. We need to either support arbitrary pickle format, or make a better way to add plugins for custom serialization.
@@ -23,6 +23,11 @@ import pytz import lz4.frame import logging +try: + from google.protobuf.message import Message as GoogleProtobufMessage +except ImportError: + GoogleProtobufMessage = None + _reconstruct = numpy.array([1, 2, 3]).__reduce__()[0] _ndarray = numpy.ndarray @@ -201,6 +206,8 @@ class SerializationContext(object): @param inst: an instance to be serialized @return a representation object or None ''' + if GoogleProtobufMessage is not None and isinstance(inst, GoogleProtobufMessage): + return (type(inst), (), inst.SerializeToString()) if isinstance(inst, type): isTF = isTypeFunctionType(inst) @@ -260,6 +267,10 @@ class SerializationContext(object): return None def setInstanceStateFromRepresentation(self, instance, representation): + if GoogleProtobufMessage is not None and isinstance(instance, GoogleProtobufMessage): + instance.ParseFromString(representation) + return True + if isinstance(instance, datetime.datetime): return True
Resolve spamming log on blobstor disconnect. Sanitize blobstor printing.
@@ -32,6 +32,12 @@ logger = logging.getLogger(__name__) CHUNK_SIZE = 16 * s_const.mebibyte +def _path_sanitize(blobstorpath): + ''' + The path might contain username/password, so just return the last part + ''' + return '.../' + blobstorpath.rsplit('/', 1)[-1] + async def to_aiter(it): ''' Take either a sync or async iteratable and yields as an async generator @@ -769,7 +775,7 @@ class _ProxyKeeper(s_coro.Fini): try: proxy = await s_telepath.openurl(path) except Exception: - logger.exception('Failed to connect to telepath %s', path) + logger.exception('Failed to connect to telepath %s', _path_sanitize(path)) raise newbsid = await self._addproxy(proxy, path) @@ -944,7 +950,7 @@ class Axon(s_cell.Cell): try: await self._start_watching_blobstor(path) except Exception: - logger.error('At axon startup, failed to connect to stored blobstor path %s', path) + logger.error('At axon startup, failed to connect to stored blobstor path %s', _path_sanitize(path)) s_glob.plex.addLoopCoro(_connect_to_blobstors()) @@ -1023,7 +1029,7 @@ class Axon(s_cell.Cell): ''' As Axon, Monitor a blobstor, by repeatedly asking long-poll-style for its new data ''' - logger.info('Watching BlobStor %s', blobstorpath) + logger.info('Watching BlobStor %s', _path_sanitize(blobstorpath)) CLONE_TIMEOUT = 60.0 cur_offset = self._getSyncProgress(bsid) @@ -1031,9 +1037,6 @@ class Axon(s_cell.Cell): try: if blobstor.isfini: blobstor = await s_telepath.openurl(blobstorpath) - if blobstor is None: - logger.warning('No longer monitoring %s for new data', blobstorpath) - break async def clone_and_next(): ''' Get the async generator and the first item of that generator ''' @@ -1063,13 +1066,18 @@ class Axon(s_cell.Cell): if genr is None: continue - logger.debug('Got clone data for %s', blobstorpath) + logger.debug('Got clone data for %s', _path_sanitize(blobstorpath)) cur_offset = 1 + await self._consume_clone_data(first_item, genr, bsid) await self._executor_nowait(self._updateSyncProgress, bsid, cur_offset) + except ConnectionRefusedError: + logger.warning('Trouble connecting to blobstor %s. Will retry in %ss.', _path_sanitize(blobstorpath), + CLONE_TIMEOUT) + await s_glob.plex.sleep(CLONE_TIMEOUT) except Exception: if not self.isfini: - logger.exception('BlobStor.blobstorLoop error') + logger.exception('BlobStor._watch_blobstor error on %s', _path_sanitize(blobstorpath)) + await s_glob.plex.sleep(CLONE_TIMEOUT) def _updateSyncProgress(self, bsid, new_offset): ''' @@ -1095,7 +1103,6 @@ class Axon(s_cell.Cell): rv = 0 else: rv = struct.unpack('>Q', lval)[0] - logger.debug('Axon._getSyncProgress: %r: %r', bsid, rv) return rv @s_common.firethread
Update mysql_toolkits.py im2recipe
@@ -11,7 +11,7 @@ def connect_mysql(): conn = pymysql.connect(host="127.0.0.1",user="root",port=3306,password="123456",database="mysql", local_infile=True) return conn except Exception as e: - print("CINNECT MYSQL ERROR:", e) + print("CONNECT MYSQL ERROR:", e) # return "connect mysql faild" @@ -35,7 +35,7 @@ def load_data_to_mysql(conn, cursor): cursor.execute(sql) conn.commit() except Exception as e: - print("CREATE MYSQL TABLE ERROR:", e) + print("LOAD DATA TO MYSQL ERROR:", e) # conn.rollback() # print("load data faild")
Fixed E131 flake8 errors continuation line unaligned for hanging indent
@@ -196,6 +196,6 @@ filterwarnings = ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning [flake8] -ignore = E131,E201,E202,E203,E221,E222,E225,E226,E231,E241,E251,E261,E262,E265,E271,E272,E293,E301,E302,E303,E401,E402,E501,E701,E702,E704,E712,E731 +ignore = E201,E202,E203,E221,E222,E225,E226,E231,E241,E251,E261,E262,E265,E271,E272,E293,E301,E302,E303,E401,E402,E501,E701,E702,E704,E712,E731 max-line-length = 120 exclude = _pytest/vendored_packages/pluggy.py
Editor: highlight current line Highlighting the current line makes it easier to spot the cursor, especially when jumping to a specific line in the editor coming from the Journal.
@@ -27,6 +27,9 @@ import 'codemirror/addon/comment/comment'; // placeholder import 'codemirror/addon/display/placeholder'; +// highlight line +import 'codemirror/addon/selection/active-line'; + import './codemirror/fold-beancount'; import './codemirror/hint-beancount'; import './codemirror/mode-beancount'; @@ -203,6 +206,7 @@ const sourceEditorOptions = { lineNumbers: true, foldGutter: true, showTrailingSpace: true, + styleActiveLine: true, gutters: ['CodeMirror-linenumbers', 'CodeMirror-foldgutter'], extraKeys: { 'Ctrl-Space': 'autocomplete',
Update django.po Author info added
# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. -# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. +# FIRST AUTHOR <[email protected]>, 2020. # msgid "" msgstr ""
Bugfix for wait_for_ssh for waiting for connectivity causes issues when using an SSH gateway yet this is simply doing a basic check for port 22 connectivity.
- role: "lxc_container_create" post_tasks: - name: Wait for ssh to be available - local_action: - module: wait_for + wait_for: port: "22" host: "{{ ansible_host | default(inventory_hostname) }}" search_regex: OpenSSH delay: 1 + delegate_to: "{{ physical_host }}" vars: is_metal: "{{ properties.is_metal|default(false) }}" tags:
Update docs for conditional Key callbacks Closes 2571
@@ -45,6 +45,26 @@ The :class:`EzKey` modifier keys (i.e. ``MASC``) can be overwritten through the 'C': 'control', } +Callbacks can also be configured to work only under certain conditions by using +the ``when()`` method. Currently, two conditions are supported: + +:: + + from libqtile.config import Key + + keys = [ + # Only trigger callback for a specific layout + Key( + [mod, 'shift'], + "j", + lazy.layout.grow().when(layout='verticaltile'), + lazy.layout.grow_down().when(layout='columns') + ), + + # Limit action to when the current window is not floating (default True) + Key([mod], "f", lazy.window.toggle_fullscreen().when(when_floating=False)) + ] + KeyChords =========
Update kikBplistmeta.py Made artifact media_to_html compliant
#!/usr/bin/env python3 - +from pathlib import Path import os import biplist from scripts.artifact_report import ArtifactHtmlReport -from scripts.ilapfuncs import logfunc, tsv, is_platform_windows +from scripts.ilapfuncs import logfunc, tsv, is_platform_windows, media_to_html def get_kikBplistmeta(files_found, report_folder, seeker, wrap_text): @@ -42,11 +42,21 @@ def get_kikBplistmeta(files_found, report_folder, seeker, wrap_text): if x['name'] == 'file-name': filename = x.get('value', '') elif key == 'image': - thumbfilename = id+'.jpg' - file = open(f'{report_folder}{thumbfilename}', "wb") + thumbfilename = id + + complete = Path(report_folder).joinpath('Kik') + if not complete.exists(): + Path(f'{complete}').mkdir(parents=True, exist_ok=True) + + file = open(f'{complete}{thumbfilename}', "wb") file.write(val[0]['value']) file.close() - thumb = f'<img src="{report_folder}{thumbfilename}" width="300"></img>' + + imagetofind = [] + imagetofind.append(f'{complete}{thumbfilename}') + thumb = media_to_html(id, imagetofind, report_folder) + + elif key == 'app-id': appid = val
Update apt_oilrig.txt Other stuff was added in previous PRs.
@@ -64,3 +64,9 @@ prosalar.com # Reference: https://misterch0c.blogspot.com/2019/04/apt34-oilrig-leak.html myleftheart.com + +# Reference: https://unit42.paloaltonetworks.com/behind-the-scenes-with-oilrig/ +# Reference: https://otx.alienvault.com/pulse/5cc8494e1a6c9c572567ba7f + +msoffice-cdn.com +office365-management.com
Fixed: duplicate parameter and missing parameter changing duplicate parameter `data_files` in `DatasetBuilder.__init__` to the missing parameter `data_dir`
@@ -231,7 +231,7 @@ class DatasetBuilder: For example to separate "squad" from "lhoestq/squad" (the builder name would be "lhoestq___squad"). data_files: for builders like "csv" or "json" that need the user to specify data files. They can be either local or remote files. For convenience you can use a DataFilesDict. - data_files: `str`, for builders that require manual download. It must be the path to the local directory containing + data_dir: `str`, for builders that require manual download. It must be the path to the local directory containing the manually downloaded data. config_kwargs: will override the defaults kwargs in config
Move TPIU configuring to set_swo_clock() method. TPIU should not have been configuring for SWO in the init() method. Improved some doc comments.
@@ -48,26 +48,40 @@ class TPIU(CoreSightComponent): return self._has_swo_uart def init(self): - """! @brief Configures the TPIU for SWO UART mode.""" + """! @brief Reads TPIU capabilities. + + Currently this method simply checks whether the TPIU supports SWO in asynchronous + UART mode. The result of this check is available via the has_swo_uart property. + """ devid = self.ap.read32(self.address + TPIU.DEVID) self._has_swo_uart = (devid & TPIU.DEVID_NRZ_MASK) != 0 - # Go ahead and configure for SWO. - self.ap.write32(self.address + TPIU.SPPR, TPIU.SPPR_TXMODE_NRZ) # Select SWO UART mode. - self.ap.write32(self.address + TPIU.FFCR, 0) # Disable formatter. - def set_swo_clock(self, swo_clock, system_clock): - """! @brief Sets the SWO clock frequency based on the system clock. + """! @brief Prepare TPIU for transmitting SWO at a given baud rate. + + Configures the TPIU for SWO UART mode, then sets the SWO clock frequency based on + the provided system clock. @param self - @param Desired SWO baud rate in Hertz. + @param swo_clock Desired SWO baud rate in Hertz. @param system_clock The frequency of the SWO clock source in Hertz. This is almost always the system clock, also called the HCLK or fast clock. - @return Boolean indicating if the requested frequency could be set within 3%. + @return Boolean indicating if SWO UART mode could be configured with the requested + baud rate set within 3%. """ + # First check whether SWO UART is supported. + if not self.has_swo_uart: + return False + + # Go ahead and configure for SWO. + self.ap.write32(self.address + TPIU.SPPR, TPIU.SPPR_TXMODE_NRZ) # Select SWO UART mode. + self.ap.write32(self.address + TPIU.FFCR, 0) # Disable formatter. + + # Compute the divider. div = (system_clock // swo_clock) - 1 actual = system_clock // (div + 1) deltaPercent = abs(swo_clock - actual) / swo_clock + # Make sure the target baud rate was met with 3%. if deltaPercent > 0.03: return False self.ap.write32(self.address + TPIU.ACPR, div & TPIU.ACPR_PRESCALER_MASK)
Fix line endings for gaphor/conftest We do unix line endings!
Everything is about services so the Case can define it's required services and start off. """ +from __future__ import annotations import logging from io import StringIO @@ -25,6 +26,7 @@ from gaphor.diagram.painter import ItemPainter from gaphor.diagram.selection import Selection T = TypeVar("T") +S = TypeVar("S") log = logging.getLogger("Gaphor") log.setLevel(logging.WARNING) @@ -60,7 +62,12 @@ class Case: def get_service(self, name): return self.session.get_service(name) - def create(self, item_cls: Type[T], subject_cls=None, subject=None) -> T: + def create( + self, + item_cls: type[T], + subject_cls: type[S] | None = None, + subject: S | None = None, + ) -> T: """Create an item with specified subject.""" if subject_cls is not None: subject = self.element_factory.create(subject_cls)
remove getting internal build number during deployment As ODF 4.9 is GA'ed, we don't need to decide whether to check mcg-operator or noobaa-operator based on build number. From ODF 4.9, its mcg-operator
@@ -92,7 +92,6 @@ from ocs_ci.utility.utils import ( exec_cmd, get_cluster_name, get_latest_ds_olm_tag, - get_ocs_build_number, is_cluster_running, run_cmd, run_cmd_multicluster, @@ -653,12 +652,8 @@ class Deployment(object): ocs_operator_names = [ defaults.ODF_OPERATOR_NAME, defaults.OCS_OPERATOR_NAME, + defaults.MCG_OPERATOR, ] - build_number = version.get_semantic_version(get_ocs_build_number()) - if build_number >= version.get_semantic_version("4.9.0-231"): - ocs_operator_names.append(defaults.MCG_OPERATOR) - else: - ocs_operator_names.append(defaults.NOOBAA_OPERATOR) # workaround for https://bugzilla.redhat.com/show_bug.cgi?id=2075422 if live_deployment and ocs_version == version.VERSION_4_9: ocs_operator_names.remove(defaults.MCG_OPERATOR)
2.7.0 Automatically generated by python-semantic-release
@@ -9,7 +9,7 @@ https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers """ from datetime import timedelta -__version__ = "2.6.1" +__version__ = "2.7.0" PROJECT_URL = "https://github.com/custom-components/alexa_media_player/" ISSUE_URL = "{}issues".format(PROJECT_URL)
Rename Struct into StructType and create an alias for the DSL TN:
@@ -1852,7 +1852,7 @@ class TypeDeclaration(object): ) -class Struct(CompiledType): +class StructType(CompiledType): """ Base class for all user struct-like composite types, such as POD structs and AST nodes. @@ -2266,7 +2266,7 @@ class Struct(CompiledType): )) -class ASTNode(Struct): +class ASTNode(StructType): """ Base class for all user AST nodes. @@ -3059,3 +3059,7 @@ T = TypeRepo() Default type repository instance, to be used to refer to a type before its declaration """ + + +# Aliases for the user DSL +Struct = StructType
Properly emit belongs-to annotations for fields' doc libadalang#923
<% type_name = field.struct.entity.api_name ret_type = field.type.entity if field.type.is_ast_node else field.type + doc = ada_doc(field, 3) %> function ${field.api_name} (Node : ${type_name}'Class) return ${ret_type.api_name}; - ${ada_doc(field, 3)} + % if doc: + ${doc} + --% belongs-to: ${field.struct.entity.api_name} + % else: + --% belongs-to: ${field.struct.entity.api_name} + % endif ## If this field return an enum node, generate a shortcut to get the ## symbolic value. % if field.type.is_bool_node: function ${field.api_name} (Node : ${type_name}'Class) return Boolean; + --% belongs-to: ${field.struct.entity.api_name} % elif field.type.is_enum_node: function ${field.api_name} (Node : ${type_name}'Class) return ${field.type.ada_kind_name}; + --% belongs-to: ${field.struct.entity.api_name} % endif </%def>
Disable more xpack features, add some documentation. Fixes
@@ -50,7 +50,14 @@ services: elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:5.4.1 environment: + # Disable all xpack related features to avoid unrelated logging + # in docker logs. https://github.com/mozilla/addons-server/issues/8887 + # This also avoids us to require authentication for local development + # which simplifies the setup. - xpack.security.enabled=false + - xpack.monitoring.enabled=false + - xpack.graph.enabled=false + - xpack.watcher.enabled=false - "discovery.type=single-node" - "ES_JAVA_OPTS=-Xms512m -Xmx512m" mem_limit: 2g
Update __init__.py Update go hub binary to fix es sync test.
__hub_url__ = ( - "https://github.com/lbryio/hub/releases/download/v0.2021.12.18.1/hub" + "https://github.com/lbryio/hub/releases/download/v0.2022.01.21.1/hub" ) from .node import Conductor from .service import ConductorService
Update README.md Links to new repo location
-![](https://raw.githubusercontent.com/isislab/CTFd/master/CTFd/static/original/img/logo.png) +![](https://raw.githubusercontent.com/CTFd/CTFd/master/CTFd/static/original/img/logo.png) ==== -[![Build Status](https://travis-ci.org/isislab/CTFd.svg?branch=master)](https://travis-ci.org/isislab/CTFd) +[![Build Status](https://travis-ci.org/CTFd/CTFd.svg?branch=master)](https://travis-ci.org/CTFd/CTFd) [![CTFd Slack](https://slack.ctfd.io/badge.svg)](https://slack.ctfd.io/) CTFd is a CTF in a can. Easily modifiable and has everything you need to run a jeopardy style CTF. Install: 1. `./prepare.sh` to install dependencies using apt. - 2. Modify [CTFd/config.py](https://github.com/isislab/CTFd/blob/master/CTFd/config.py) to your liking. + 2. Modify [CTFd/config.py](https://github.com/CTFd/CTFd/blob/master/CTFd/config.py) to your liking. 3. Use `python serve.py` in a terminal to drop into debug mode. - 4. [Here](https://github.com/isislab/CTFd/wiki/Deployment) are some deployment options + 4. [Here](https://github.com/CTFd/CTFd/wiki/Deployment) are some deployment options Live Demo: https://demo.ctfd.io/
Use numpy.repeat() to accelerate offsets2parents About a factor of 6 speedup
@@ -41,12 +41,10 @@ class JaggedArray(awkward.array.base.AwkwardArrayWithContent): @classmethod def offsets2parents(cls, offsets): - out = cls.numpy.zeros(offsets[-1], dtype=cls.JaggedArray.fget(None).INDEXTYPE) - cls.numpy.add.at(out, offsets[offsets != offsets[-1]][1:], 1) - cls.numpy.cumsum(out, out=out) - if offsets[0] > 0: - out[:offsets[0]] = -1 - return out + dtype = cls.JaggedArray.fget(None).INDEXTYPE + counts = cls.numpy.diff(offsets, prepend=dtype.type(0)) + indices = cls.numpy.arange(-1, len(offsets) - 1, dtype=dtype) + return cls.numpy.repeat(indices, counts) @classmethod def startsstops2parents(cls, starts, stops): @@ -68,7 +66,7 @@ class JaggedArray(awkward.array.base.AwkwardArrayWithContent): changes[-1] = len(parents) changes[1:-1] = tmp - length = parents.max() + 1 if parents.size > 0 else 0 + length = parents.max() + 1 if len(parents) > 0 else 0 starts = cls.numpy.zeros(length, dtype=cls.JaggedArray.fget(None).INDEXTYPE) counts = cls.numpy.zeros(length, dtype=cls.JaggedArray.fget(None).INDEXTYPE)
tests: Remove test_change_email_address_visibility from test_realm.py. This commit removes test_change_email_address_visibility which is used to test changing email_address_visibility using 'PATCH /realm' endpoint as we already do this in do_test_realm_update_api and invalid value is also tested in test_invalid_integer_attribute_values.
@@ -466,24 +466,6 @@ class RealmTest(ZulipTestCase): result = self.client_patch("/json/realm", req) self.assert_json_error(result, "Invalid bot_creation_policy") - def test_change_email_address_visibility(self) -> None: - # We need an admin user. - user_profile = self.example_user("iago") - - self.login_user(user_profile) - invalid_value = 12 - req = dict(email_address_visibility=orjson.dumps(invalid_value).decode()) - result = self.client_patch("/json/realm", req) - self.assert_json_error(result, "Invalid email_address_visibility") - - req = dict( - email_address_visibility=orjson.dumps(Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS).decode() - ) - result = self.client_patch("/json/realm", req) - self.assert_json_success(result) - realm = get_realm("zulip") - self.assertEqual(realm.email_address_visibility, Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS) - def test_invalid_integer_attribute_values(self) -> None: integer_values = [key for key, value in Realm.property_types.items() if value is int]
[oauth] catch keyring.errors.InitError when saving / loading password fixes
@@ -17,7 +17,7 @@ import keyring.backends.SecretService # type: ignore import keyring.backends.kwallet # type: ignore from keyring.backend import KeyringBackend # type: ignore from keyring.core import load_keyring # type: ignore -from keyring.errors import KeyringLocked, PasswordDeleteError # type: ignore +from keyring.errors import KeyringLocked, PasswordDeleteError, InitError # type: ignore import keyrings.alt.file # type: ignore import requests from dropbox.oauth import DropboxOAuth2FlowNoRedirect # type: ignore @@ -287,7 +287,7 @@ class OAuth2Session: self._token_access_type = access_type - except KeyringLocked: + except (KeyringLocked, InitError): title = f"Could not load auth token, {self.keyring.name} is locked" msg = "Please unlock the keyring and try again." exc = KeyringAccessError(title, msg) @@ -353,7 +353,7 @@ class OAuth2Session: " > Warning: No supported keyring found, " "Dropbox credentials stored in plain text." ) - except KeyringLocked: + except (KeyringLocked, InitError): # switch to plain text keyring if user won't unlock self.keyring = keyrings.alt.file.PlaintextKeyring() self._conf.set("app", "keyring", "keyrings.alt.file.PlaintextKeyring") @@ -372,7 +372,7 @@ class OAuth2Session: try: self.keyring.delete_password("Maestral", self._account_id) click.echo(" > Credentials removed.") - except KeyringLocked: + except (KeyringLocked, InitError): title = f"Could not delete auth token, {self.keyring.name} is locked" msg = "Please unlock the keyring and try again." exc = KeyringAccessError(title, msg)
Update README.md Found a typo.
@@ -112,7 +112,7 @@ Please be aware that the environment.xml and requirements.txt each use a differe $Env:Path ``` Copy the resulting output. Example: `"PATH": "/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"` -Then open the applicable settings.json in your VS Code. (See how to open command palette [here](https://code.visualstudio.com/docs/getstarted/tips-and-tricks). Seach "settings" and click Open Workspace Settings (JSON)). Paste: +Then open the applicable settings.json in your VS Code. (See how to open command palette [here](https://code.visualstudio.com/docs/getstarted/tips-and-tricks). Search "settings" and click Open Workspace Settings (JSON)). Paste: ``` "terminal.integrated.env.windows": { "PATH": "/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" @@ -125,7 +125,7 @@ Then open the applicable settings.json in your VS Code. (See how to open command echo $PATH ``` Copy the resulting output. Example: `"PATH": "/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"` -Then open the applicable settings.json in your VS Code. (See how to open command palette [here](https://code.visualstudio.com/docs/getstarted/tips-and-tricks). Seach "settings" and click Open Workspace Settings (JSON)). Paste: +Then open the applicable settings.json in your VS Code. (See how to open command palette [here](https://code.visualstudio.com/docs/getstarted/tips-and-tricks). Search "settings" and click Open Workspace Settings (JSON)). Paste: ``` "terminal.integrated.env.osx": { "PATH": "PATH": "/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"
Fix sporadic cluster_t test failure The test was failing because a GC sweep wasn't happening. This fix ensures that the GC does a pass before running the assertion Tested-by: Mark Nunberg
@@ -19,6 +19,7 @@ from couchbase.tests.base import CouchbaseTestCase from couchbase.connstr import ConnectionString from couchbase.cluster import Cluster, ClassicAuthenticator,\ PasswordAuthenticator, NoBucketError, MixedAuthError +import gc class ClusterTest(CouchbaseTestCase): @@ -55,6 +56,8 @@ class ClusterTest(CouchbaseTestCase): # Should fail again once the bucket has been GC'd del cb + gc.collect() + self.assertRaises(NoBucketError, cluster.n1ql_query, 'select mockrow') def test_no_mixed_auth(self):
[fix] anilist: fix crash when yuna.moe is unreacheable Make sure `ids` is declared even when the call to relations.yuna.moe fails. Closes
@@ -144,6 +144,7 @@ class AniList(object): ) or 'all' in selected_formats if has_selected_type and has_selected_release_status: + ids = {} try: ids = task.requests.post( 'https://relations.yuna.moe/api/ids', @@ -152,7 +153,7 @@ class AniList(object): logger.debug(f'Additional IDs: {ids}') except RequestException as e: logger.verbose(f'Couldn\'t fetch additional IDs: {e}') - if not ids or not isinstance(ids, dict): + if not isinstance(ids, dict): ids = {} logger.debug(f'Anime Entry: {anime}')