Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
6,000
def logs(self, follow=False): follow = ["--follow"] if follow else [] cmdline = ["podman", "logs"] + follow + [self._id or self.get_id()] output = run_cmd(cmdline, return_output=True) return output
Get logs from this container. Iterator has one log line followed by a newline in next item. The logs are NOT encoded (they are str, not bytes). Let's look at an example:: image = conu.PodmanImage("fedora", tag="27") command = ["bash", "-c", "for x in `seq 1 5`; do echo $x; sleep 1; done"] container = image.run_via_binary(command=command) for line in container.logs(follow=True): print(line) This will output .. code-block:: none '1' '\n' '2' '\n' '3' '\n' '4' '\n' '5' '\n' :param follow: bool, provide new logs as they come :return: iterator (of str)
6,001
def get_genericpage(cls, kb_app): q = dectate.Query() klasses = sorted(q(kb_app), key=lambda args: args[0].order) if not klasses: return Genericpage else: return klasses[0][1]
Return the one class if configured, otherwise default
6,002
def _check_dtype(self, dtype): try: return _ffi_types[dtype] except KeyError: raise ValueError("dtype must be one of {0!r} and not {1!r}".format( sorted(_ffi_types.keys()), dtype))
Check if dtype string is valid and return ctype string.
6,003
def saveData(self, dataOutputFile, categoriesOutputFile): if self.records is None: return False if not dataOutputFile.endswith("csv"): raise TypeError("data output file must be csv.") if not categoriesOutputFile.endswith("json"): raise TypeError("category output file must be json") dataOutputDirectory = os.path.dirname(dataOutputFile) if not os.path.exists(dataOutputDirectory): os.makedirs(dataOutputDirectory) categoriesOutputDirectory = os.path.dirname(categoriesOutputFile) if not os.path.exists(categoriesOutputDirectory): os.makedirs(categoriesOutputDirectory) with open(dataOutputFile, "w") as f: writer = csv.DictWriter(f, fieldnames=self.fieldNames) writer.writeheader() writer.writerow(self.types) writer.writerow(self.specials) for data in self.records: for record in data: writer.writerow(record) with open(categoriesOutputFile, "w") as f: f.write(json.dumps(self.categoryToId, sort_keys=True, indent=4, separators=(",", ": "))) return dataOutputFile
Save the processed data and the associated category mapping. @param dataOutputFile (str) Location to save data @param categoriesOutputFile (str) Location to save category map @return (str) Path to the saved data file iff saveData() is successful.
6,004
def cli(env, sortby, columns, datacenter, username, storage_type): block_manager = SoftLayer.BlockStorageManager(env.client) block_volumes = block_manager.list_block_volumes(datacenter=datacenter, username=username, storage_type=storage_type, mask=columns.mask()) table = formatting.Table(columns.columns) table.sortby = sortby for block_volume in block_volumes: table.add_row([value or formatting.blank() for value in columns.row(block_volume)]) env.fout(table)
List block storage.
6,005
def processing_blocks(self): pb_list = ProcessingBlockList() return json.dumps(dict(active=pb_list.active, completed=pb_list.completed, aborted=pb_list.aborted))
Return the a JSON dict encoding the PBs known to SDP.
6,006
def to_pandas_df(self, column_names=None, selection=None, strings=True, virtual=False, index_name=None): import pandas as pd data = self.to_dict(column_names=column_names, selection=selection, strings=strings, virtual=virtual) if index_name is not None: if index_name in data: index = data.pop(index_name) else: index = self.evaluate(index_name, selection=selection) else: index = None df = pd.DataFrame(data=data, index=index) if index is not None: df.index.name = index_name return df
Return a pandas DataFrame containing the ndarray corresponding to the evaluated data If index is given, that column is used for the index of the dataframe. Example >>> df_pandas = df.to_pandas_df(["x", "y", "z"]) >>> df_copy = vaex.from_pandas(df_pandas) :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used :param selection: {selection} :param strings: argument passed to DataFrame.get_column_names when column_names is None :param virtual: argument passed to DataFrame.get_column_names when column_names is None :param index_column: if this column is given it is used for the index of the DataFrame :return: pandas.DataFrame object
6,007
def get_fc2(supercell, symmetry, dataset, atom_list=None, decimals=None): if atom_list is None: fc_dim0 = supercell.get_number_of_atoms() else: fc_dim0 = len(atom_list) force_constants = np.zeros((fc_dim0, supercell.get_number_of_atoms(), 3, 3), dtype=, order=) atom_list_done = _get_force_constants_disps( force_constants, supercell, dataset, symmetry, atom_list=atom_list) rotations = symmetry.get_symmetry_operations()[] lattice = np.array(supercell.get_cell().T, dtype=, order=) permutations = symmetry.get_atomic_permutations() distribute_force_constants(force_constants, atom_list_done, lattice, rotations, permutations, atom_list=atom_list) if decimals: force_constants = force_constants.round(decimals=decimals) return force_constants
Force constants are computed. Force constants, Phi, are calculated from sets for forces, F, and atomic displacement, d: Phi = -F / d This is solved by matrix pseudo-inversion. Crystal symmetry is included when creating F and d matrices. Returns ------- ndarray Force constants[ i, j, a, b ] i: Atom index of finitely displaced atom. j: Atom index at which force on the atom is measured. a, b: Cartesian direction indices = (0, 1, 2) for i and j, respectively dtype=double shape=(len(atom_list),n_satom,3,3),
6,008
def down_by_time(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1): files = command.list_files(*filters, remote_dir=remote_dir) most_recent = sorted(files, key=lambda f: f.datetime) to_sync = most_recent[-count:] _notify_sync(Direction.down, to_sync) down_by_files(to_sync[::-1], local_dir=local_dir)
Sync most recent file by date, time attribues
6,009
def merge(cls, *args, **kwargs): newkeys = bool(kwargs.get(, False)) ignore = kwargs.get(, list()) if len(args) < 1: raise ValueError() elif not all(isinstance(s, Ent) for s in args): raise ValueError( ) ent = args[0] data = cls.load(ent) for ent in args[1:]: for key, value in ent.__dict__.items(): if key in ignore: continue if key in data.__dict__: v1 = data.__dict__[key] if type(value) == type(v1): if isinstance(v1, Ent): data.__dict__[key] = cls.merge(v1, value, **kwargs) else: data.__dict__[key] = cls.load(value) elif newkeys: data.__dict__[key] = value return data
Create a new Ent from one or more existing Ents. Keys in the later Ent objects will overwrite the keys of the previous Ents. Later keys of different type than in earlier Ents will be bravely ignored. The following keyword arguments are recognized: newkeys: boolean value to determine whether keys from later Ents should be included if they do not exist in earlier Ents. ignore: list of strings of key names that should not be overridden by later Ent keys.
6,010
def apply_policy(self, policy): tenant_name = policy[] fw_id = policy[] fw_name = policy[] LOG.info("asa_apply_policy: tenant=%(tenant)s fw_id=%(fw_id)s " "fw_name=%(fw_name)s", {: tenant_name, : fw_id, : fw_name}) cmds = ["conf t", "changeto context " + tenant_name] for rule_id, rule in policy[].items(): acl = self.build_acl(tenant_name, rule) LOG.info("rule[%(rule_id)s]: name=%(name)s enabled=%(enabled)s" " protocol=%(protocol)s dport=%(dport)s " "sport=%(sport)s dip=%(dport)s " "sip=%(sip)s action=%(dip)s", {: rule_id, : rule.get(), : rule.get(), : rule.get(), : rule.get(), : rule.get(), : rule.get(), : rule.get(), : rule.get()}) if rule_id in self.rule_tbl: cmds.append( + self.rule_tbl[rule_id]) self.rule_tbl[rule_id] = acl if tenant_name in self.tenant_rule: if rule_id not in self.tenant_rule[tenant_name][]: self.tenant_rule[tenant_name][].append(rule_id) cmds.append(acl) cmds.append("access-group " + tenant_name + " global") cmds.append("write memory") LOG.info("cmds sent is %s", cmds) data = {"commands": cmds} return self.rest_send_cli(data)
Apply a firewall policy.
6,011
def run(file, access_key, secret_key, **kwargs): if file: import sys file_path, file_name = os.path.split(file) sys.path.append(file_path) strategy_module = importlib.import_module(os.path.splitext(file_name)[0]) init = getattr(strategy_module, , None) handle_func = getattr(strategy_module, , None) schedule = getattr(strategy_module, , None) else: init, handle_func, scedule = [None] * 3 setKey(access_key, secret_key) url = kwargs.get() hostname = if url: hostname = urlparse(url).hostname setUrl( + hostname, + hostname) reconn = kwargs.get(, -1) from huobitrade import HBWebsocket, HBRestAPI from huobitrade.datatype import HBMarket, HBAccount, HBMargin restapi = HBRestAPI(get_acc=True) ws = HBWebsocket(host=hostname, reconn=reconn) auth_ws = HBWebsocket(host=hostname, auth=True, reconn=reconn) data = HBMarket() account = HBAccount() margin = HBMargin() ws_open = False ws_auth = False @ws.after_open def _open(): nonlocal ws_open click.echo() ws_open = True @auth_ws.after_auth def _auth(): nonlocal ws_auth click.echo() ws_auth = True ws.run() auth_ws.run() for i in range(10): time.sleep(3) click.echo(f) if ws_open&ws_auth: break else: ws.stop() auth_ws.stop() raise Exception() if init: init(restapi, ws, auth_ws) if handle_func: for k, v in handle_func.items(): if k.split()[0].lower() == : ws.register_handle_func(k)(v) else: auth_ws.register_handle_func(k)(v) if schedule: print() from huobitrade.handler import TimeHandler interval = scedule.__kwdefaults__[] timerhandler = TimeHandler(, interval) timerhandler.handle = lambda msg: schedule(restapi, ws, auth_ws) timerhandler.start() while True: try: code = click.prompt() if code == : if click.confirm(): break else: continue else: result = eval(code) click.echo(result) except Exception as e: click.echo(traceback.format_exc()) ws.stop() auth_ws.stop()
命令行运行huobitrade
6,012
def set_owner(obj_name, principal, obj_type=): C:\\MyDirectoryjsnuffyfile sid = get_sid(principal) obj_flags = flags() if obj_type.lower() not in obj_flags.obj_type: raise SaltInvocationError( .format(obj_type)) if in obj_type.lower(): obj_name = dacl().get_reg_name(obj_name) new_privs = set() luid = win32security.LookupPrivilegeValue(, ) new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED)) luid = win32security.LookupPrivilegeValue(, ) new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED)) p_handle = win32api.GetCurrentProcess() t_handle = win32security.OpenProcessToken( p_handle, win32security.TOKEN_ALL_ACCESS | win32con.TOKEN_ADJUST_PRIVILEGES) win32security.AdjustTokenPrivileges(t_handle, 0, new_privs) try: win32security.SetNamedSecurityInfo( obj_name, obj_flags.obj_type[obj_type.lower()], obj_flags.element[], sid, None, None, None) except pywintypes.error as exc: log.exception(, principal, exc) raise CommandExecutionError( .format(obj_name), exc.strerror) return True
Set the owner of an object. This can be a file, folder, registry key, printer, service, etc... Args: obj_name (str): The object for which to set owner. This can be the path to a file or folder, a registry key, printer, etc. For more information about how to format the name see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa379593(v=vs.85).aspx principal (str): The name of the user or group to make owner of the object. Can also pass a SID. obj_type (Optional[str]): The type of object for which to set the owner. Default is ``file`` Returns: bool: True if successful, raises an error otherwise Usage: .. code-block:: python salt.utils.win_dacl.set_owner('C:\\MyDirectory', 'jsnuffy', 'file')
6,013
def _update_service_profile(self, handle, service_profile, vlan_id, ucsm_ip): virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) eth_port_paths = ["%s%s" % (service_profile, ep) for ep in virtio_port_list] vlan_name = self.make_vlan_name(vlan_id) try: handle.StartTransaction() obj = handle.GetManagedObject( None, self.ucsmsdk.LsServer.ClassId(), {self.ucsmsdk.LsServer.DN: service_profile}) if not obj: LOG.debug( , service_profile, ucsm_ip) return False for eth_port_path in eth_port_paths: eth = handle.GetManagedObject( obj, self.ucsmsdk.VnicEther.ClassId(), {self.ucsmsdk.VnicEther.DN: eth_port_path}, True) if eth: vlan_path = (eth_port_path + const.VLAN_PATH_PREFIX + vlan_name) eth_if = handle.AddManagedObject(eth, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_path, self.ucsmsdk.VnicEtherIf.NAME: vlan_name, self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True) if not eth_if: LOG.debug( , service_profile, vlan_id) return False else: LOG.debug( , eth_port_path) handle.CompleteTransaction() return True except Exception as e: return self._handle_ucsm_exception(e, , vlan_name, ucsm_ip)
Updates Service Profile on the UCS Manager. Each of the ethernet ports on the Service Profile representing the UCS Server, is updated with the VLAN profile corresponding to the vlan_id passed in.
6,014
def get_max_value(self): value = self.get_default_value() if self.attribute_type is str: max_value = value.ljust(self.max_length + 1, ) elif self.attribute_type is int: max_value = self.max_length + 1 else: raise TypeError( % self.local_name) return max_value
Get the maximum value
6,015
def readLocationElement(self, locationElement): if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError("No axes defined") loc = {} for dimensionElement in locationElement.findall(".dimension"): dimName = dimensionElement.attrib.get("name") if self._strictAxisNames and dimName not in self.axisDefaults: self.log.warning("Location with undefined axis: \"%s\".", dimName) continue xValue = yValue = None try: xValue = dimensionElement.attrib.get() xValue = float(xValue) except ValueError: self.log.warning("KeyError in readLocation xValue %3.3f", xValue) try: yValue = dimensionElement.attrib.get() if yValue is not None: yValue = float(yValue) except ValueError: pass if yValue is not None: loc[dimName] = (xValue, yValue) else: loc[dimName] = xValue return loc
Format 0 location reader
6,016
def get_security_attributes_for_user(user=None): if user is None: user = get_current_user() assert isinstance(user, security.TOKEN_USER), ( "user must be TOKEN_USER instance") SD = security.SECURITY_DESCRIPTOR() SA = security.SECURITY_ATTRIBUTES() SA.descriptor = SD SA.bInheritHandle = 1 ctypes.windll.advapi32.InitializeSecurityDescriptor( ctypes.byref(SD), security.SECURITY_DESCRIPTOR.REVISION) ctypes.windll.advapi32.SetSecurityDescriptorOwner( ctypes.byref(SD), user.SID, 0) return SA
Return a SECURITY_ATTRIBUTES structure with the SID set to the specified user (uses current user if none is specified).
6,017
def recorddiff(a, b, buffersize=None, tempdir=None, cache=True, strict=False): added = recordcomplement(b, a, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict) subtracted = recordcomplement(a, b, buffersize=buffersize, tempdir=tempdir, cache=cache, strict=strict) return added, subtracted
Find the difference between records in two tables. E.g.:: >>> import petl as etl >>> a = [['foo', 'bar', 'baz'], ... ['A', 1, True], ... ['C', 7, False], ... ['B', 2, False], ... ['C', 9, True]] >>> b = [['bar', 'foo', 'baz'], ... [2, 'B', False], ... [9, 'A', False], ... [3, 'B', True], ... [9, 'C', True]] >>> added, subtracted = etl.recorddiff(a, b) >>> added +-----+-----+-------+ | bar | foo | baz | +=====+=====+=======+ | 3 | 'B' | True | +-----+-----+-------+ | 9 | 'A' | False | +-----+-----+-------+ >>> subtracted +-----+-----+-------+ | foo | bar | baz | +=====+=====+=======+ | 'A' | 1 | True | +-----+-----+-------+ | 'C' | 7 | False | +-----+-----+-------+ Convenient shorthand for ``(recordcomplement(b, a), recordcomplement(a, b))``. See also :func:`petl.transform.setops.recordcomplement`. See also the discussion of the `buffersize`, `tempdir` and `cache` arguments under the :func:`petl.transform.sorts.sort` function. .. versionchanged:: 1.1.0 If `strict` is `True` then strict set-like behaviour is used.
6,018
def run_interrupted(self): start = datetime.datetime.now() try: cwd = os.getcwd() v = sys.version.replace("\n", " ") logger.info("Custodian started in singleshot mode at {} in {}." .format(start, cwd)) logger.info("Custodian running on Python version {}".format(v)) if os.path.exists(Custodian.LOG_FILE): self.run_log = loadfn(Custodian.LOG_FILE, cls=MontyDecoder) if len(self.run_log) == 0: job_n = 0 job = self.jobs[job_n] logger.info("Setting up job no. 1 ({}) ".format(job.name)) job.setup() self.run_log.append({"job": job.as_dict(), "corrections": [], : job_n}) return len(self.jobs) else: job_n = self.run_log[-1][] job = self.jobs[job_n] if len(self.run_log[-1][]) > 0: logger.info("Reran {}.run due to fixable errors".format( job.name)) logger.info("Checking error handlers for {}.run".format( job.name)) if self._do_check(self.handlers): logger.info("Failed validation based on error handlers") for x in self.run_log[-1]["corrections"]: if not x["actions"] and x["handler"].raises_runtime_error: self.run_log[-1]["handler"] = x["handler"] s = "Unrecoverable error for handler: {}. " \ "Raising RuntimeError".format(x["handler"]) raise NonRecoverableError(s, True, x["handler"]) logger.info("Corrected input based on error handlers") return len(self.jobs) - job_n logger.info("Checking validator for {}.run".format(job.name)) for v in self.validators: if v.check(): self.run_log[-1]["validator"] = v logger.info("Failed validation based on validator") s = "Validation failed: {}".format(v) raise ValidationError(s, True, v) logger.info("Postprocessing for {}.run".format(job.name)) job.postprocess() if len(self.jobs) == (job_n + 1): self.finished = True return 0 job_n += 1 job = self.jobs[job_n] self.run_log.append({"job": job.as_dict(), "corrections": [], : job_n}) job.setup() return len(self.jobs) - job_n except CustodianError as ex: logger.error(ex.message) if ex.raises: raise finally: logger.info("Logging to {}...".format(Custodian.LOG_FILE)) dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) end = datetime.datetime.now() logger.info("Run ended at {}.".format(end)) run_time = end - start logger.info("Run completed. Total time taken = {}." .format(run_time)) if self.finished and self.gzipped_output: gzip_dir(".")
Runs custodian in a interuppted mode, which sets up and validates jobs but doesn't run the executable Returns: number of remaining jobs Raises: ValidationError: if a job fails validation ReturnCodeError: if the process has a return code different from 0 NonRecoverableError: if an unrecoverable occurs MaxCorrectionsPerJobError: if max_errors_per_job is reached MaxCorrectionsError: if max_errors is reached MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached
6,019
def get_date_822(): cmd = if not os.path.exists(cmd): raise ValueError(%cmd) args = [cmd,] result = get_cmd_stdout(args).strip() result = normstr(result) return result
return output of 822-date command
6,020
def qn_to_qubo(expr): try: import sympy except ImportError: raise ImportError("This function requires sympy. Please install it.") assert type(expr) == sympy.Add to_i = lambda s: int(str(s)[1:]) max_i = max(map(to_i, expr.free_symbols)) + 1 qubo = [[0.] * max_i for _ in range(max_i)] for arg in expr.args: syms = arg.free_symbols assert len(syms) <= 2 if len(syms) == 2: assert type(arg) == sympy.Mul i, j = list(map(to_i, syms)) if i > j: i, j = j, i if i == j: if len(arg.args) == 2: qubo[i][i] = float(arg.args[0]) elif len(arg.args) == 1: qubo[i][i] = 1.0 else: raise ValueError(f"Too many args! arg.args = {arg.args}") continue if len(arg.args) == 3: qubo[i][j] = float(arg.args[0]) elif len(arg.args) == 2: qubo[i][j] if len(syms) == 1: if len(arg.args) == 2: assert type(arg) == sympy.Mul i = to_i(next(iter(syms))) qubo[i][i] = float(arg.args[0]) elif len(arg.args) == 1: qubo[i][i] = 1.0 else: raise ValueError(f"Too many args! arg.args = {arg.args}") return qubo
Convert Sympy's expr to QUBO. Args: expr: Sympy's quadratic expression with variable `q0`, `q1`, ... Returns: [[float]]: Returns QUBO matrix.
6,021
def parse_env(self, env=None, namespace=None): env = env or os.environ results = {} if not namespace: namespace = self.prog namespace = namespace.upper() for option in self._options: env_var = option.kwargs.get() default_env = "%s_%s" % (namespace, option.name.upper()) if env_var and env_var in env: value = env[env_var] results[option.dest] = option.type(value) elif default_env in env: value = env[default_env] results[option.dest] = option.type(value) return results
Parse environment variables.
6,022
def _get_fields(self): if self.data is not None: return self.begin, self.end, self.data else: return self.begin, self.end
Used by str, unicode, repr and __reduce__. Returns only the fields necessary to reconstruct the Interval. :return: reconstruction info :rtype: tuple
6,023
def reorient(self, up, look): from blmath.geometry.transform import rotation_from_up_and_look from blmath.numerics import as_numeric_array up = as_numeric_array(up, (3,)) look = as_numeric_array(look, (3,)) if self.v is not None: self.v = np.dot(rotation_from_up_and_look(up, look), self.v.T).T
Reorient the mesh by specifying two vectors. up: The foot-to-head direction. look: The direction the body is facing. In the result, the up will end up along +y, and look along +z (i.e. facing towards a default OpenGL camera).
6,024
def _process_callbacks(self): os.read(self._schedule_pipe[0], 1024) calls_from_executor, self._calls_from_executor = self._calls_from_executor, [] for c in calls_from_executor: c()
Process callbacks from `call_from_executor` in eventloop.
6,025
def get_line_value(self, context_type): if context_type.upper() == "ENV": return self.line_envs elif context_type.upper() == "LABEL": return self.line_labels
Get the values defined on this line. :param context_type: "ENV" or "LABEL" :return: values of given type defined on this line
6,026
def peek_assoc(store, container, _stack=None): assoc = [] try: if store.getRecordAttr(, container) == : for i in container: assoc.append(store.peek(i, container, _stack=_stack)) else: for i in container: assoc.append((store.strRecord(i, container), store.peek(i, container, _stack=_stack))) except TypeError as e: try: for i in container: pass raise e except TypeError: raise TypeError("container is not iterable; peek is not compatible\n\t{}".format(e.args[0])) return assoc
Deserialize association lists.
6,027
async def _post(self, zone_id: int = None, json: dict = None) -> dict: return await self._request( , .format(zone_id), json=json)
Post data to a (non)existing zone.
6,028
def thumbUrl(self): thumb = self.firstAttr(, , ) return self._server.url(thumb, includeToken=True) if thumb else None
Return the first first thumbnail url starting on the most specific thumbnail for that item.
6,029
def retry(ex=RETRIABLE, tries=4, delay=5, backoff=2, logger=None): def deco_retry(func): @wraps(func) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return func(*args, **kwargs) except ex as error: sleeping = mdelay + randint(0, 5) msg = "%s, Retrying in %d seconds..." % (str(error), sleeping) if logger: logger.warning(msg) sleep(sleeping) mtries -= 1 mdelay *= backoff return func(*args, **kwargs) return f_retry return deco_retry
Retry calling the decorated function using an exponential backoff. http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry :param ex: the exception to check. may be a tuple of exceptions to check :param tries: number of times to try (not retry) before giving up :param delay: initial delay between retries in seconds. A random 0-5s will be added to this number to stagger calls. :param backoff: backoff multiplier e.g. value of 2 will double the delay each retry :param logger: logger to use. If None, print
6,030
def set_job(self, key, func, args): res, pk = key jobs, lock = self._jobs task = _tasks.UpdateTask(func(*args), key) with lock: job = jobs[res].get(pk) had = bool(job) if not job: job = task jobs[res][pk] = job else: task.cancel() self._log.debug(, res.tag, pk, if not had else ) return job
Get a scheduled task or set if none exists. Returns: - task coroutine/continuation
6,031
def generate_strip_subparser(subparsers): parser = subparsers.add_parser( , description=constants.STRIP_DESCRIPTION, epilog=constants.STRIP_EPILOG, formatter_class=ParagraphFormatter, help=constants.STRIP_HELP) parser.set_defaults(func=strip_files) utils.add_common_arguments(parser) parser.add_argument(, help=constants.STRIP_INPUT_HELP, metavar=) parser.add_argument(, help=constants.STRIP_OUTPUT_HELP, metavar=)
Adds a sub-command parser to `subparsers` to process prepared files for use with the tacl ngrams command.
6,032
def get_attached_pipettes(self): api = object.__getattribute__(self, ) instrs = {} for mount, data in api.attached_instruments.items(): instrs[mount.name.lower()] = { : data.get(, None), : data.get(, None), : Axis.by_mount(mount), : Axis.of_plunger(mount) } if data.get(): instrs[mount.name.lower()][] \ = data.get(, None) return instrs
Mimic the behavior of robot.get_attached_pipettes
6,033
def run(self,evloop=None): self.sendEvent("peng3d:peng.run",{"peng":self,"window":self.window,"evloop":evloop}) self.window.run(evloop) self.sendEvent("peng3d:peng.exit",{"peng":self})
Runs the application main loop. This method is blocking and needs to be called from the main thread to avoid OpenGL bugs that can occur. ``evloop`` may optionally be a subclass of :py:class:`pyglet.app.base.EventLoop` to replace the default event loop.
6,034
def _read_opt_pad(self, code, *, desc): _type = self._read_opt_type(code) if code == 0: opt = dict( desc=desc, type=_type, length=1, ) elif code == 1: _size = self._read_unpack(1) _padn = self._read_fileng(_size) opt = dict( desc=desc, type=_type, length=_size + 2, padding=_padn, ) else: raise ProtocolError(f) return opt
Read HOPOPT padding options. Structure of HOPOPT padding options [RFC 8200]: * Pad1 Option: +-+-+-+-+-+-+-+-+ | 0 | +-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hopopt.pad.type Option Type 0 0 hopopt.pad.type.value Option Number 0 0 hopopt.pad.type.action Action (00) 0 2 hopopt.pad.type.change Change Flag (0) * PadN Option: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - - | 1 | Opt Data Len | Option Data +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - - Octets Bits Name Description 0 0 hopopt.pad.type Option Type 0 0 hopopt.pad.type.value Option Number 0 0 hopopt.pad.type.action Action (00) 0 2 hopopt.pad.type.change Change Flag (0) 1 8 hopopt.opt.length Length of Option Data 2 16 hopopt.pad.padding Padding
6,035
def store(self, store_item): required_keys = {"type": str, "timestamp": float} if not isinstance(store_item, dict): raise TypeError("The stored item should be a dict") for k, v in required_keys.items(): if k not in store_item: raise AttributeError("{} is not available. Please add it.".format(k)) if not isinstance(store_item[k], v): raise TypeError("{} is not a {}. Please change it. ".format(k, v)) self._arctic_lib.check_quota() self._collection.update(store_item, store_item, upsert=True)
Store for tweets and user information. Must have all required information and types
6,036
def lock(self, key, client): self.key = key self.client = client
Set the key that will be used to ensure messages come from one party Args: key (string): The key used to validate future messages client (string): A string that will be returned to indicate who locked this device.
6,037
def PSLLDQ(cpu, dest, src): count = Operators.ZEXTEND(src.read(), dest.size * 2) byte_count = Operators.ITEBV(src.size * 2, count > 15, 16, count) bit_count = byte_count * 8 val = Operators.ZEXTEND(dest.read(), dest.size * 2) val = val << (Operators.ZEXTEND(bit_count, dest.size * 2)) dest.write(Operators.EXTRACT(val, 0, dest.size))
Packed Shift Left Logical Double Quadword Shifts the destination operand (first operand) to the left by the number of bytes specified in the count operand (second operand). The empty low-order bytes are cleared (set to all 0s). If the value specified by the count operand is greater than 15, the destination operand is set to all 0s. The destination operand is an XMM register. The count operand is an 8-bit immediate. TEMP = COUNT; if (TEMP > 15) TEMP = 16; DEST = DEST << (TEMP * 8);
6,038
def calculate_overlap(self): overs = [] if not self.tx_obj1.range.overlaps(self.tx_obj2.range): return [] for i in range(0,len(self.j1)): for j in range(0,len(self.j2)): if self.j1[i].overlaps(self.j2[j],tolerance=self.tolerance): overs.append([i,j]) return overs
Create the array that describes how junctions overlap
6,039
def compute(self, bottomUpInput, enableLearn, enableInference=None): assert (bottomUpInput.dtype == numpy.dtype()) or \ (bottomUpInput.dtype == numpy.dtype()) or \ (bottomUpInput.dtype == numpy.dtype()) self.iterationIdx = self.iterationIdx + 1 if enableInference is None: if enableLearn: enableInference = False else: enableInference = True self._setStatePointers() y = self.cells4.compute(bottomUpInput, enableInference, enableLearn) self.currentOutput = y.reshape((self.numberOfCols, self.cellsPerColumn)) self.avgLearnedSeqLength = self.cells4.getAvgLearnedSeqLength() self._copyAllocatedStates() if self.collectStats: activeColumns = bottomUpInput.nonzero()[0] if enableInference: predictedState = self.infPredictedState[] else: predictedState = self.lrnPredictedState[] self._updateStatsInferEnd(self._internalStats, activeColumns, predictedState, self.colConfidence[]) output = self._computeOutput() self.printComputeEnd(output, learn=enableLearn) self.resetCalled = False return output
Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.compute`.
6,040
def authenticate(self, provider=None, identifier=None): "Fetch user for a given provider by id." provider_q = Q(provider__name=provider) if isinstance(provider, Provider): provider_q = Q(provider=provider) try: access = AccountAccess.objects.filter( provider_q, identifier=identifier ).select_related()[0] except IndexError: return None else: return access.user
Fetch user for a given provider by id.
6,041
def _generate_throw_error(self, name, reason): throw_exc = self.emit(throw_exc.format(name, reason))
Emits a generic error throwing line.
6,042
def FinalizeTaskStorage(self, task): if task.identifier not in self._task_storage_writers: raise IOError(.format( task.identifier))
Finalizes a processed task storage. Args: task (Task): task. Raises: IOError: if the task storage does not exist. OSError: if the task storage does not exist.
6,043
def aggregation_not_used_text_element(feature, parent): _ = feature, parent header = aggregation_not_used_text[] return header.capitalize()
Retrieve reference title header string from definitions.
6,044
def safe_cast(invar, totype): outvar = totype(invar) if not isinstance(outvar, totype): raise TypeError("Result of cast to is " .format(totype, type(outvar))) return outvar
Performs a "safe" typecast. Ensures that `invar` properly casts to `totype`. Checks after casting that the result is actually of type `totype`. Any exceptions raised by the typecast itself are unhandled. Parameters ---------- invar (arbitrary) -- Value to be typecast. totype |type| -- Type to which `invar` is to be cast. Returns ------- outvar `type 'totype'` -- Typecast version of `invar` Raises ------ ~exceptions.TypeError If result of typecast is not of type `totype`
6,045
def interpret_element(element_type: str, text: str, span: str) -> Element: return Element(element_type, interpret_span(span), text)
Construct an Element instance from regexp groups.
6,046
def clause_indices(self): if not self.is_tagged(CLAUSE_ANNOTATION): self.tag_clause_annotations() return [word.get(CLAUSE_IDX, None) for word in self[WORDS]]
The list of clause indices in ``words`` layer. The indices are unique only in the boundary of a single sentence.
6,047
def scrape_links(self, text, context=False): return self.iter_processed_links(io.StringIO(text), context=context)
Convenience function for scraping from a text string.
6,048
def mpim_open(self, *, users: List[str], **kwargs) -> SlackResponse: kwargs.update({"users": users}) return self.api_call("mpim.open", json=kwargs)
This method opens a multiparty direct message. Args: users (list): A lists of user ids. The ordering of the users is preserved whenever a MPIM group is returned. e.g. ['W1234567890', 'U2345678901', 'U3456789012']
6,049
def intervals(graph): interval_graph = Graph() heads = [graph.entry] interv_heads = {} processed = {i: False for i in graph} edges = defaultdict(list) while heads: head = heads.pop(0) if not processed[head]: processed[head] = True interv_heads[head] = Interval(head) change = True while change: change = False for node in graph.rpo[1:]: if all( p in interv_heads[head] for p in graph.all_preds(node)): change |= interv_heads[head].add_node(node) for node in graph: if node not in interv_heads[head] and node not in heads: if any( p in interv_heads[head] for p in graph.all_preds(node)): edges[interv_heads[head]].append(node) assert (node not in heads) heads.append(node) interval_graph.add_node(interv_heads[head]) interv_heads[head].compute_end(graph) for interval, heads in edges.items(): for head in heads: interval_graph.add_edge(interval, interv_heads[head]) interval_graph.entry = graph.entry.interval if graph.exit: interval_graph.exit = graph.exit.interval return interval_graph, interv_heads
Compute the intervals of the graph Returns interval_graph: a graph of the intervals of G interv_heads: a dict of (header node, interval)
6,050
def _indexed_ifilter(self, recursive=True, matches=None, flags=FLAGS, forcetype=None): match = self._build_matcher(matches, flags) if recursive: restrict = forcetype if recursive == self.RECURSE_OTHERS else None def getter(i, node): for ch in self._get_children(node, restrict=restrict): yield (i, ch) inodes = chain(*(getter(i, n) for i, n in enumerate(self.nodes))) else: inodes = enumerate(self.nodes) for i, node in inodes: if (not forcetype or isinstance(node, forcetype)) and match(node): yield (i, node)
Iterate over nodes and their corresponding indices in the node list. The arguments are interpreted as for :meth:`ifilter`. For each tuple ``(i, node)`` yielded by this method, ``self.index(node) == i``. Note that if *recursive* is ``True``, ``self.nodes[i]`` might not be the node itself, but will still contain it.
6,051
def p_continue_statement_2(self, p): p[0] = self.asttypes.Continue(p[2]) p[0].setpos(p)
continue_statement : CONTINUE identifier SEMI | CONTINUE identifier AUTOSEMI
6,052
def print_number_str(self, value, justify_right=True): length = len(value.translate(None, )) if length > 4: self.print_str() return pos = (4-length) if justify_right else 0 for i, ch in enumerate(value): if ch == : self.set_decimal(pos-1, True) else: self.set_digit(pos, ch) pos += 1
Print a 4 character long string of numeric values to the display. This function is similar to print_str but will interpret periods not as characters but as decimal points associated with the previous character.
6,053
def _set_mct_l2ys_state(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=mct_l2ys_state.mct_l2ys_state, is_container=, presence=False, yang_name="mct-l2ys-state", rest_name="mct-l2ys-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__mct_l2ys_state = t if hasattr(self, ): self._set()
Setter method for mct_l2ys_state, mapped from YANG variable /mct_l2ys_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_mct_l2ys_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mct_l2ys_state() directly. YANG Description: MCT L2sys Operational Information
6,054
def parse_fields_http(self, response, extra_org_map=None): org_map = self.org_map.copy() try: org_map.update(extra_org_map) except (TypeError, ValueError, IndexError, KeyError): pass try: asn_data = { : None, : None, : None, : None, : None, : None } try: net_list = response[][] if not isinstance(net_list, list): net_list = [net_list] except (KeyError, TypeError): log.debug() net_list = [] for n in reversed(net_list): try: asn_data[] = ( org_map[n[][].upper()] ) except KeyError as e: log.debug( .format(str(e))) continue break if not asn_data[]: log.debug() raise ASNRegistryError() except ASNRegistryError: raise except Exception as e: raise ASNParseError( .format(response, e)[:100]) return asn_data
The function for parsing ASN fields from a http response. Args: response (:obj:`str`): The response from the ASN http server. extra_org_map (:obj:`dict`): Dictionary mapping org handles to RIRs. This is for limited cases where ARIN REST (ASN fallback HTTP lookup) does not show an RIR as the org handle e.g., DNIC (which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are (note the case-sensitive - this is meant to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic'. Defaults to None. Returns: dict: The ASN lookup results :: { 'asn' (None) - Cannot retrieve with this method. 'asn_date' (None) - Cannot retrieve with this method. 'asn_registry' (str) - The assigned ASN registry 'asn_cidr' (None) - Cannot retrieve with this method. 'asn_country_code' (None) - Cannot retrieve with this method. 'asn_description' (None) - Cannot retrieve with this method. } Raises: ASNRegistryError: The ASN registry is not known. ASNParseError: ASN parsing failed.
6,055
def setup_client(self, client_id=None, user_data=None, scan=True, broadcast=False): if client_id is None: client_id = str(uuid.uuid4()) if client_id in self._clients: raise ArgumentError("Duplicate client_id: {}".format(client_id)) async def _client_callback(conn_string, _, event_name, event): event_tuple = (conn_string, event_name, event) await self._forward_client_event(client_id, event_tuple) client_monitor = self.adapter.register_monitor([], [], _client_callback) self._clients[client_id] = dict(user_data=user_data, connections={}, monitor=client_monitor) self._adjust_global_events(client_id, scan, broadcast) return client_id
Setup a newly connected client. ``client_id`` must be unique among all connected clients. If it is passed as None, a random client_id will be generated as a string and returned. This method reserves internal resources for tracking what devices this client has connected to and installs a monitor into the adapter on behalf of the client. It should be called whenever a new client connects to the device server before any other activities by that client are allowed. By default, all clients start receiving ``device_seen`` events but if you want your client to also receive broadcast events, you can pass broadcast=True. Args: client_id (str): A unique identifier for this client that will be used to refer to it in all future interactions. If this is None, then a random string will be generated for the client_id. user_data (object): An arbitrary object that you would like to store with this client and will be passed to your event handler when events are forwarded to this client. scan (bool): Whether to install a monitor to listen for device_found events. broadcast (bool): Whether to install a monitor to list for broadcast events. Returns: str: The client_id. If a client id was passed in, it will be the same as what was passed in. If no client id was passed in then it will be a random unique string.
6,056
def _compare_rows(from_recs, to_recs, keys): "Return the set of keys which have changed." return set( k for k in keys if sorted(from_recs[k].items()) != sorted(to_recs[k].items()) )
Return the set of keys which have changed.
6,057
def build_url_request(self): params = {} headers = {} self._authenticator(params, headers) self._grant(params) return Request(self._endpoint, urlencode(params), headers)
Consults the authenticator and grant for HTTP request parameters and headers to send with the access token request, builds the request using the stored endpoint and returns it.
6,058
def from_iterable(cls, frames, sort=False): return FrameSet(sorted(frames) if sort else frames)
Build a :class:`FrameSet` from an iterable of frames. Args: frames (collections.Iterable): an iterable object containing frames as integers sort (bool): True to sort frames before creation, default is False Returns: :class:`FrameSet`:
6,059
def trace_dispatch(self, frame, event, arg): if hasattr(self, ): return self.pdb.trace_dispatch(frame, event, arg) else: return Pdb.trace_dispatch(self, frame, event, arg)
allow to switch to Pdb instance
6,060
def add(self, f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed): return self.send.host_add(f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed)
Add a t_hosts record :param f_ipaddr: IP address :param f_macaddr: MAC Address :param f_hostname: Hostname :param f_netbios_name: NetBIOS Name :param f_engineer: Engineer username :param f_asset_group: Asset group :param f_confirmed: Confirmed boolean :return: (True/False, t_hosts.id or response message)
6,061
def save(self, filename, ftype=): from ..param import Param def gather_params(self, plist): if isinstance(self,Param): plist.append(self) plist = [] self.traverse(gather_params, plist) names = self.parameter_names(adjust_for_printing=True) if ftype==: try: import h5py f = h5py.File(filename,) for p,n in zip(plist,names): n = n.replace(,) p = p.values d = f.create_dataset(n,p.shape,dtype=p.dtype) d[:] = p if hasattr(self, ): d = f.create_dataset(,self.param_array.shape, dtype=self.param_array.dtype) d[:] = self.param_array f.close() except: raise
Save all the model parameters into a file (HDF5 by default). This is not supported yet. We are working on having a consistent, human readable way of saving and loading GPy models. This only saves the parameter array to a hdf5 file. In order to load the model again, use the same script for building the model you used to build this model. Then load the param array from this hdf5 file and set the parameters of the created model: >>> m[:] = h5_file['param_array'] This is less then optimal, we are working on a better solution to that.
6,062
def create_mbed_detector(**kwargs): host_os = platform.system() if host_os == "Windows": from .windows import StlinkDetectWindows return StlinkDetectWindows(**kwargs) elif host_os == "Linux": from .linux import StlinkDetectLinuxGeneric return StlinkDetectLinuxGeneric(**kwargs) elif host_os == "Darwin": from .darwin import StlinkDetectDarwin return StlinkDetectDarwin(**kwargs) else: return None
! Factory used to create host OS specific mbed-lstools object :param kwargs: keyword arguments to pass along to the constructors @return Returns MbedLsTools object or None if host OS is not supported
6,063
def get_sorted_source_files( self, source_filenames_or_globs: Union[str, List[str]], recursive: bool = True) -> List[str]: if isinstance(source_filenames_or_globs, str): source_filenames_or_globs = [source_filenames_or_globs] final_filenames = [] for sfg in source_filenames_or_globs: sfg_expanded = expanduser(sfg) log.debug("Looking for: {!r}", sfg_expanded) for filename in glob.glob(sfg_expanded, recursive=recursive): log.debug("Trying: {!r}", filename) if self.should_exclude(filename): log.info("Skipping file {!r}", filename) continue final_filenames.append(filename) final_filenames.sort() return final_filenames
Returns a sorted list of filenames to process, from a filename, a glob string, or a list of filenames/globs. Args: source_filenames_or_globs: filename/glob, or list of them recursive: use :func:`glob.glob` in recursive mode? Returns: sorted list of files to process
6,064
def create_host(self, host_id, name, ipaddr, rack_id = None): return hosts.create_host(self, host_id, name, ipaddr, rack_id)
Create a host. @param host_id: The host id. @param name: Host name @param ipaddr: IP address @param rack_id: Rack id. Default None. @return: An ApiHost object
6,065
def _encode_dict_as_string(value): if value.startswith("{\n"): value = "{" + value[2:] if value.endswith("\n}"): value = value[:-2] + "}" return value.replace(, ).replace("\\n", "\\\\n").replace("\n", "\\n")
Takes the PLIST string of a dict, and returns the same string encoded such that it can be included in the string representation of a GSNode.
6,066
def get_extended_summaryf(self, *args, **kwargs): def func(f): doc = f.__doc__ self.get_extended_summary(doc or , *args, **kwargs) return f return func
Extract the extended summary from a function docstring This function can be used as a decorator to extract the extended summary of a function docstring (similar to :meth:`get_sectionsf`). Parameters ---------- ``*args`` and ``**kwargs`` See the :meth:`get_extended_summary` method. Note, that the first argument will be the docstring of the specified function Returns ------- function Wrapper that takes a function as input and registers its summary via the :meth:`get_extended_summary` method
6,067
def _get_pieces(tiles, ports, players_opts, pieces_opts): if pieces_opts == Opt.empty: return dict() elif pieces_opts == Opt.debug: players = catan.game.Game.get_debug_players() return { (hexgrid.NODE, 0x23): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[0]), (hexgrid.EDGE, 0x22): catan.pieces.Piece(catan.pieces.PieceType.road, players[0]), (hexgrid.NODE, 0x67): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[1]), (hexgrid.EDGE, 0x98): catan.pieces.Piece(catan.pieces.PieceType.road, players[1]), (hexgrid.NODE, 0x87): catan.pieces.Piece(catan.pieces.PieceType.settlement, players[2]), (hexgrid.EDGE, 0x89): catan.pieces.Piece(catan.pieces.PieceType.road, players[2]), (hexgrid.EDGE, 0xA9): catan.pieces.Piece(catan.pieces.PieceType.road, players[3]), (hexgrid.TILE, 0x77): catan.pieces.Piece(catan.pieces.PieceType.robber, None), } elif pieces_opts in (Opt.preset, ): deserts = filter(lambda tile: tile.terrain == catan.board.Terrain.desert, tiles) coord = hexgrid.tile_id_to_coord(list(deserts)[0].tile_id) return { (hexgrid.TILE, coord): catan.pieces.Piece(catan.pieces.PieceType.robber, None) } elif pieces_opts in (Opt.random, ): logging.warning(.format(pieces_opts))
Generate a dictionary of pieces using the given options. pieces options supported: - Opt.empty -> no locations have pieces - Opt.random -> - Opt.preset -> robber is placed on the first desert found - Opt.debug -> a variety of pieces are placed around the board :param tiles: list of tiles from _generate_tiles :param ports: list of ports from _generate_ports :param players_opts: Opt :param pieces_opts: Opt :return: dictionary mapping (hexgrid.TYPE, coord:int) -> Piece
6,068
def pull_alignments_from(self, reads_to_use, shallow=False): new_alnmat = self.copy(shallow=shallow) for hid in xrange(self.num_haplotypes): hdata = new_alnmat.data[hid] hdata.data *= reads_to_use[hdata.indices] hdata.eliminate_zeros() if new_alnmat.count is not None: new_alnmat.count[np.logical_not(reads_to_use)] = 0 return new_alnmat
Pull out alignments of certain reads :param reads_to_use: numpy array of dtype=bool specifying which reads to use :param shallow: whether to copy sparse 3D matrix only or not :return: a new AlignmentPropertyMatrix object that particular reads are
6,069
def crop_to_seg_extents(img, seg, padding): beg_coords, end_coords = crop_coords(seg, padding) img = crop_3dimage(img, beg_coords, end_coords) seg = crop_3dimage(seg, beg_coords, end_coords) return img, seg
Crop the image (usually MRI) to fit within the bounding box of a segmentation (or set of seg)
6,070
def update_item(self, item, expected_value=None, return_values=None): expected_value = self.dynamize_expected_value(expected_value) key = self.build_key_from_values(item.table.schema, item.hash_key, item.range_key) attr_updates = self.dynamize_attribute_updates(item._updates) response = self.layer1.update_item(item.table.name, key, attr_updates, expected_value, return_values, object_hook=item_object_hook) item._updates.clear() if in response: item.consumed_units = response[] return response
Commit pending item updates to Amazon DynamoDB. :type item: :class:`boto.dynamodb.item.Item` :param item: The Item to update in Amazon DynamoDB. It is expected that you would have called the add_attribute, put_attribute and/or delete_attribute methods on this Item prior to calling this method. Those queued changes are what will be updated. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name/value pairs before they were updated. Possible values are: None, 'ALL_OLD', 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned. If 'ALL_NEW' is specified, then all the attributes of the new version of the item are returned. If 'UPDATED_NEW' is specified, the new versions of only the updated attributes are returned.
6,071
def cmp(self,range2,overlap_size=0): if self.overlaps(range2,padding=overlap_size): return 0 if self.chr < range2.chr: return -1 elif self.chr > range2.chr: return 1 elif self.end < range2.start: return -1 elif self.start > range2.end: return 1 sys.stderr.write("ERROR: cmp function unexpcted state\n") sys.exit() return 0
the comparitor for ranges * return 1 if greater than range2 * return -1 if less than range2 * return 0 if overlapped :param range2: :param overlap_size: allow some padding for an 'equal' comparison (default 0) :type range2: GenomicRange :type overlap_size: int
6,072
def version(self): try: _version = (curl[Gentoo._LATEST_TXT] | \ awk[] | \ cut["-f2", "-d="])().strip() _version = datetime.utcfromtimestamp(int(_version))\ .strftime("%Y-%m-%d") except ProcessExecutionError as proc_ex: _version = "unknown" LOG.error("Could not determine timestamp: %s", str(proc_ex)) return _version
Return the build date of the gentoo container.
6,073
def create(self): if not self._sync and not hasattr(self, ): self._buffer = {} if not os.path.exists(self.cache_dir): os.makedirs(self.cache_dir)
Create the write buffer and cache directory.
6,074
def add(self, interval, offset): start, stop = self.get_start_stop(interval) if len(self.starts) > 0: if start < self.starts[-1] or offset <= self.offsets[-1][1]: raise ValueError() self.offsets[-1][1] = offset self.offsets[-1][2] += 1 else: self.starts.append(start) self.stops.append(stop) self.offsets.append([offset, offset, 1])
The added interval must be overlapping or beyond the last stored interval ie. added in sorted order. :param interval: interval to add :param offset: full virtual offset to add :return:
6,075
def make_request(self, method, path, params={}, body="", username=None, password=None, base_uri=None, content_type=None): headers = { : CreateSend.user_agent, : , : } if content_type: headers[] = content_type parsed_base_uri = urlparse( CreateSend.base_uri if not base_uri else base_uri) if username and password: headers[] = "Basic %s" % base64.b64encode( ("%s:%s" % (username, password)).encode()).decode() elif self.auth_details: if in self.auth_details and self.auth_details[]: headers[] = "Basic %s" % base64.b64encode( ("%s:x" % self.auth_details[]).encode()).decode() elif in self.auth_details and self.auth_details[]: headers[] = "Bearer %s" % self.auth_details[ ] self.headers = headers if self.fake_web: actual_url = "https://%s%s" % (parsed_base_uri.netloc, self.build_url(parsed_base_uri, path, params)) self.faker.actual_url = actual_url def same_urls(url_a, url_b): a = urlparse(url_a) b = urlparse(url_b) return (a.scheme == b.scheme and a.netloc == b.netloc and a.path == b.path and a.params == b.params and parse_qs(a.query) == parse_qs(b.query) and a.fragment == b.fragment ) if not same_urls(self.faker.url, actual_url): raise Exception("Fakert match actual URL (%s)" % ( self.faker.url, actual_url)) self.faker.actual_body = body def same_bodies(body_a, body_b): return json.loads(body_a) == json.loads(body_b) if self.faker.body is not None: if not same_bodies(self.faker.body, body): raise Exception("Fakert match actual body (%s)" % ( self.faker.body, body)) data = self.faker.open() if self.faker else status = self.faker.status if ( self.faker and self.faker.status) else 200 return self.handle_response(status, data) c = VerifiedHTTPSConnection(parsed_base_uri.netloc, timeout=self.timeout) c.request(method, self.build_url( parsed_base_uri, path, params), body, headers) response = c.getresponse() if response.getheader(, ) == : data = gzip.GzipFile(fileobj=BytesIO(response.read())).read() else: data = response.read() c.close() return self.handle_response(response.status, data)
username and password should only be set when it is intended that the default basic authentication mechanism using the API key be overridden (e.g. when using the apikey route with username and password).
6,076
def derivative(self, point=None): if self.pad_mode == and self.pad_const != 0: return PartialDerivative(self.domain, self.axis, self.range, self.method, self.pad_mode, 0) else: return self
Return the derivative operator. The partial derivative is usually linear, but in case the 'constant' ``pad_mode`` is used with nonzero ``pad_const``, the derivative is given by the derivative with 0 ``pad_const``. Parameters ---------- point : `domain` `element-like`, optional The point to take the derivative in. Does not change the result since the operator is affine.
6,077
def cache(opts, serial): return LazyLoader( _module_dirs(opts, , ), opts, tag=, pack={: opts, : {: serial}}, )
Returns the returner modules
6,078
def generate(self, api): for namespace in api.namespaces.values(): with self.output_to_relative_path(.format(fmt_namespace(namespace.name))): self._generate_base_namespace_module(namespace)
Generates a module for each namespace. Each namespace will have Python classes to represent data types and routes in the Stone spec.
6,079
def clr(args): p = OptionParser(clr.__doc__) p.set_bedpe() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) bedpe, ref = args if bedpe.endswith(".bam"): bedpefile = bedpe.replace(".bam", ".bedpe") if need_update(bedpe, bedpefile): cmd = "bamToBed -bedpe -i {0}".format(bedpe) sh(cmd, outfile=bedpefile) bedpe = bedpefile filtered = bedpe + ".filtered" if need_update(bedpe, filtered): filter_bedpe(bedpe, filtered, ref, rc=opts.rc, minlen=opts.minlen, maxlen=opts.maxlen) rmdup = filtered + ".sorted.rmdup" if need_update(filtered, rmdup): rmdup_bedpe(filtered, rmdup, dupwiggle=opts.dup) converted = rmdup + ".converted" if need_update(rmdup, converted): fp = open(rmdup) fw = open(converted, "w") for row in fp: r = BedpeLine(row) print(r.bedline, file=fw) fw.close() merged = converted + ".merge.bed" if need_update(converted, merged): mergeBed(converted)
%prog clr [bamfile|bedpefile] ref.fasta Use mates from BEDPE to extract ranges where the ref is covered by mates. This is useful in detection of chimeric contigs.
6,080
def get_all_resources(datasets): resources = [] for dataset in datasets: for resource in dataset.get_resources(): resources.append(resource) return resources
Get all resources from a list of datasets (such as returned by search) Args: datasets (List[Dataset]): list of datasets Returns: List[hdx.data.resource.Resource]: list of resources within those datasets
6,081
def DeleteResource(self, path, type, id, initial_headers, options=None): if options is None: options = {} initial_headers = initial_headers or self.default_headers headers = base.GetHeaders(self, initial_headers, , path, id, type, options) request = request_object._RequestObject(type, documents._OperationType.Delete) result, self.last_response_headers = self.__Delete(path, request, headers) self._UpdateSessionIfRequired(headers, result, self.last_response_headers) return result
Deletes a Azure Cosmos resource and returns it. :param str path: :param str type: :param str id: :param dict initial_headers: :param dict options: The request options for the request. :return: The deleted Azure Cosmos resource. :rtype: dict
6,082
def update_existing_peers( self, num_to_remove, peer_table=None, con=None, path=None ): if path is None: path = self.atlasdb_path if self.last_clean_time + atlas_peer_clean_interval() < time_now(): log.debug("%s: revalidate old peers" % self.my_hostport) atlas_revalidate_peers( con=con, path=path, peer_table=peer_table ) self.last_clean_time = time_now() removed = self.remove_unhealthy_peers( num_to_remove, con=con, path=path, peer_table=peer_table ) for peer in removed: if peer in self.new_peers: self.new_peers.remove(peer) return len(removed)
Update the set of existing peers: * revalidate the existing but old peers * remove at most $num_to_remove unhealthy peers Return the number of peers removed
6,083
def _does_not_contain_replica_sections(sysmeta_pyxb): if len(getattr(sysmeta_pyxb, , [])): raise d1_common.types.exceptions.InvalidSystemMetadata( 0, .format( d1_common.xml.get_req_val(sysmeta_pyxb.identifier) ), identifier=d1_common.xml.get_req_val(sysmeta_pyxb.identifier), )
Assert that ``sysmeta_pyxb`` does not contain any replica information.
6,084
def generate_key(filepath): fs = path.abspath(path.expanduser(filepath)) with open(fs, ) as outfile: outfile.write(Fernet.generate_key()) chmod(fs, 0o400) return fs
generates a new, random secret key at the given location on the filesystem and returns its path
6,085
def build_absolute_uri(request, url): if app_settings.get(): return urljoin(app_settings.get(), url) return request.build_absolute_uri(url)
Allow to override printing url, not necessarily on the same server instance.
6,086
def valid_address(address): if not address: return False components = str(address).split() if len(components) > 2 or not valid_hostname(components[0]): return False if len(components) == 2 and not valid_port(components[1]): return False return True
Determines whether the specified address string is valid.
6,087
def _plot_ts_cols(ts): logger_dataframes.info("enter get_ts_cols()") d = {} try: units = " (" + ts["paleoData_units"] + ")" except KeyError as e: units = "" logger_dataframes.warn("get_ts_cols: KeyError: paleoData_units not found, {}".format(e)) try: d[ts["paleoData_variableName"] + units] = ts["paleoData_values"] except KeyError as e: logger_dataframes.warn("get_ts_cols: KeyError: variableName or values not found, {}".format(e)) for k, v in ts.items(): if re_pandas_x_num.match(k): try: units = " (" + ts[k + "Units"] + ")" d[k + units] = v except KeyError as e: logger_dataframes.warn("get_ts_cols: KeyError: Special column units, {}, {}".format(k, e)) logger_dataframes.info("exit get_ts_cols: found {}".format(len(d))) return d
Get variable + values vs year, age, depth (whichever are available) :param dict ts: TimeSeries dictionary :return dict: Key: variableName, Value: Panda Series object
6,088
def video_loss(top_out, targets, model_hparams, vocab_size, weights_fn): del vocab_size logits = top_out logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:]) targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01) return common_layers.padded_cross_entropy( logits, targets, model_hparams.label_smoothing, cutoff=cutoff, weights_fn=weights_fn)
Compute loss numerator and denominator for one shard of output.
6,089
def _collect_args(args) -> ISeq: if isinstance(args, tuple): return llist.list(args) raise TypeError("Python variadic arguments should always be a tuple")
Collect Python starred arguments into a Basilisp list.
6,090
def diff_medians(array_one, array_two): array_one = check_array(array_one) array_two = check_array(array_two) diff_medians = np.ma.median(array_one) - np.ma.median(array_two) return diff_medians
Computes the difference in medians between two arrays of values. Given arrays will be flattened (to 1D array) regardless of dimension, and any non-finite/NaN values will be ignored. Parameters ---------- array_one, array_two : iterable Two arrays of values, possibly of different length. Returns ------- diff_medians : float scalar measuring the difference in medians, ignoring NaNs/non-finite values. Raises ------ ValueError If one or more of the arrays are empty.
6,091
def get_equalisers(self): if not self.__equalisers: self.__equalisers = yield from self.handle_list( self.API.get()) return self.__equalisers
Get the equaliser modes supported by this device.
6,092
def getDataFromFIFO(self, bytesToRead): return self.i2c_io.readBlock(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_R_W, bytesToRead)
reads the specified number of bytes from the FIFO, should be called after a call to getFifoCount to ensure there is new data available (to avoid reading duplicate data). :param bytesToRead: the number of bytes to read. :return: the bytes read.
6,093
def delete(self): removed = False try: with db.session.begin_nested(): if self.is_new(): db.session.delete(self) removed = True else: self.status = PIDStatus.DELETED db.session.add(self) except SQLAlchemyError: logger.exception("Failed to delete PID.", extra=dict(pid=self)) raise if removed: logger.info("Deleted PID (removed).", extra=dict(pid=self)) else: logger.info("Deleted PID.", extra=dict(pid=self)) return True
Delete the persistent identifier. If the persistent identifier haven't been registered yet, it is removed from the database. Otherwise, it's marked as :attr:`invenio_pidstore.models.PIDStatus.DELETED`. :returns: `True` if the PID is successfully removed.
6,094
def dim(self): if len(self._orb.vxvv) == 2: return 1 elif len(self._orb.vxvv) == 3 or len(self._orb.vxvv) == 4: return 2 elif len(self._orb.vxvv) == 5 or len(self._orb.vxvv) == 6: return 3
NAME: dim PURPOSE: return the dimension of the Orbit INPUT: (none) OUTPUT: dimension HISTORY: 2011-02-03 - Written - Bovy (NYU)
6,095
def QA_SU_save_stock_terminated(client=DATABASE): print("!!! tushare 这个函数已经失效!!!") df = QATs.get_terminated() print( " Get stock terminated from tushare,stock count is %d (终止上市股票列表)" % len(df) ) coll = client.stock_terminated client.drop_collection(coll) json_data = json.loads(df.reset_index().to_json(orient=)) coll.insert(json_data) print(" 保存终止上市股票列表 到 stock_terminated collection, OK")
获取已经被终止上市的股票列表,数据从上交所获取,目前只有在上海证券交易所交易被终止的股票。 collection: code:股票代码 name:股票名称 oDate:上市日期 tDate:终止上市日期 :param client: :return: None
6,096
def analyses(self): response = self._request("tasks/list") return json.loads(response.content.decode())[]
Retrieve a list of analyzed samples. :rtype: list :return: List of objects referencing each analyzed file.
6,097
def set_level(self, position, channel=None): try: position = float(position) except Exception as err: LOG.debug("HelperLevel.set_level: Exception %s" % (err,)) return False self.writeNodeData("LEVEL", position, channel)
Seek a specific value by specifying a float() from 0.0 to 1.0.
6,098
def set_breaks_and_labels(self, ranges, layout_info, pidx): ax = self.axs[pidx] facet.set_breaks_and_labels(self, ranges, layout_info, pidx) ax.xaxis.set_ticks_position() ax.yaxis.set_ticks_position()
Add breaks and labels to the axes Parameters ---------- ranges : dict-like range information for the axes layout_info : dict-like facet layout information pidx : int Panel index
6,099
def read_input_registers(slave_id, starting_address, quantity): function = ReadInputRegisters() function.starting_address = starting_address function.quantity = quantity return _create_request_adu(slave_id, function.request_pdu)
Return ADU for Modbus function code 04: Read Input Registers. :param slave_id: Number of slave. :return: Byte array with ADU.