Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
388,000
def server_info(self): response = self._post(self.apiurl + "/v2/server/info", data={: self.apikey}) return self._raise_or_extract(response)
Query information about the server.
388,001
def get_by_index(self, index): try: return self[index] except KeyError: for v in self.get_volumes(): if v.index == str(index): return v raise KeyError(index)
Returns a Volume or Disk by its index.
388,002
def run_subprocess(executable_command, command_arguments = [], timeout=None, print_process_output=True, stdout_file=None, stderr_file=None, poll_seconds=.100, buffer_size=-1, daemon=False, return_std=False): assert_variable_type(command_arguments, list) assert_variable_type(executable_command, str) _string_vars = [stdout_file, stderr_file] [assert_variable_type(x, [str, NoneType, unicode]) for x in _string_vars + command_arguments] assert_variable_type(print_process_output, bool) assert_variable_type(return_std, bool) _float_vars = [timeout, poll_seconds] [assert_variable_type(x, [int, float, NoneType]) for x in _float_vars] global process, _nbsr_stdout, _nbsr_stderr process = None _nbsr_stdout = None _nbsr_stderr = None def _exec_subprocess(): global process, _nbsr_stdout, _nbsr_stderr process = subprocess.Popen([executable_command] + command_arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=buffer_size, preexec_fn=os.setsid) _nbsr_stdout = NBSRW(process.stdout, print_process_output, stdout_file) _nbsr_stderr = NBSRW(process.stderr, print_process_output, stderr_file) if daemon: return _deadline = None if timeout is not None: _deadline = timeit.default_timer() + timeout while process.poll() is None: if _deadline is not None and timeit.default_timer() > _deadline and process.poll() is None: os.killpg(process.pid, signal.SIGTERM) raise TimeoutError("Sub-process did not complete before %.4f seconds elapsed" %(timeout)) time.sleep(poll_seconds) execution_time = timeit.timeit(_exec_subprocess, number=1) if return_std: return process, execution_time, _nbsr_stdout, _nbsr_stderr return process, execution_time
Create and run a subprocess and return the process and execution time after it has completed. The execution time does not include the time taken for file i/o when logging the output if stdout_file and stderr_file arguments are given. Positional arguments: executable_command (str) -- executable command to run command_arguments (list) -- command line arguments timeout (int/float) -- how many seconds to allow for process completion print_process_output (bool) -- whether to print the process' live output stdout_file (str) -- file to log stdout to stderr_file (str) -- file to log stderr to poll_seconds(int/float) -- how often in seconds to poll the subprocess to check for completion daemon(bool) -- whether the process is a daemon. If True, returns process immediately after creation along with start time rather than execution time. return_std (bool) -- whether to return a reference to the processes' NBSRW stdout and stderr
388,003
def main(): args = _parse_arg(CountryConverter().valid_class) coco = CountryConverter(additional_data=args.additional_data) converted_names = coco.convert( names=args.names, src=args.src, to=args.to, enforce_list=False, not_found=args.not_found) print(args.output_sep.join( [str(etr) for etr in converted_names] if isinstance(converted_names, list) else [str(converted_names)]))
Main entry point - used for command line call
388,004
def mkdir(dir_path): if not os.path.isdir(dir_path) or not os.path.exists(dir_path): os.makedirs(dir_path)
Make directory if not existed
388,005
async def genSchema(self, name, version, attrNames) -> Schema: schema = Schema(name, version, attrNames, self.issuerId) return await self.wallet.submitSchema(schema)
Generates and submits Schema. :param name: schema name :param version: schema version :param attrNames: a list of attributes the schema contains :return: submitted Schema
388,006
def _clean_rule(self, rule): if rule.at_keyword is not None: return rule cleaned_token_list = [] for token_list in split_on_comma(rule.selector): if self._token_list_matches_tree(token_list): if len(cleaned_token_list) > 0: cleaned_token_list.append( cssselect.parser.Token(, , len(cleaned_token_list) + 1)) cleaned_token_list += token_list if not cleaned_token_list: return None rule.selector = cleaned_token_list return rule
Cleans a css Rule by removing Selectors without matches on the tree Returns None if the whole rule do not match :param rule: CSS Rule to check :type rule: A tinycss Rule object :returns: A cleaned tinycss Rule with only Selectors matching the tree or None :rtype: tinycss Rule or None
388,007
def GeneratePassphrase(length=20): valid_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" valid_chars += "0123456789 ,-_&$ return "".join(random.choice(valid_chars) for i in range(length))
Create a 20 char passphrase with easily typeable chars.
388,008
def element_to_objects(payload: Dict) -> List: entities = [] cls = MAPPINGS.get(payload.get()) if not cls: return [] transformed = transform_attributes(payload, cls) entity = cls(**transformed) if hasattr(entity, "post_receive"): entity.post_receive() entities.append(entity) return entities
Transform an Element to a list of entities recursively.
388,009
def last_modified(self): if self.entries: latest = max(self.entries, key=lambda x: x.last_modified) return arrow.get(latest.last_modified) return arrow.get()
Gets the most recent modification time for all entries in the view
388,010
def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): key = natsort_keygen(key, alg) return sorted(seq, reverse=reverse, key=key)
Sorts an iterable naturally. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the iterable. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out: list The sorted input. See Also -------- natsort_keygen : Generates the key that makes natural sorting possible. realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``. humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``. index_natsorted : Returns the sorted indexes from `natsorted`. Examples -------- Use `natsorted` just like the builtin `sorted`:: >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num3', {u}'num5']
388,011
def triggerid_get(hostid=None, trigger_desc=None, priority=4, **kwargs): s docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see modules docstring) :return: Trigger ID and description. False if no trigger found or on failure. CLI Example: .. code-block:: bash salt zabbix.triggerid_get 1111 5 trigger.getresultcommenthostid and trigger_desc params are requiredoutputtriggeriddescriptionfilterpriorityhostidsurlauthresultresultdescriptionresult'] = r return ret return False else: return False else: raise KeyError except KeyError: return ret
.. versionadded:: Fluorine Retrieve trigger ID and description based in host ID and trigger description. .. note:: https://www.zabbix.com/documentation/3.4/manual/api/reference/trigger/get :param hostid: ID of the host whose trigger we want to find :param trigger_desc: Description of trigger (trigger name) whose we want to find :param priority: Priority of trigger (useful if we have same name for more triggers with different priorities) :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: Trigger ID and description. False if no trigger found or on failure. CLI Example: .. code-block:: bash salt '*' zabbix.triggerid_get 1111 'trigger name to find' 5
388,012
def remove_hyperedge(self, hyperedge_id): if not self.has_hyperedge_id(hyperedge_id): raise ValueError("No such hyperedge exists.") frozen_tail = \ self._hyperedge_attributes[hyperedge_id]["__frozen_tail"] frozen_head = \ self._hyperedge_attributes[hyperedge_id]["__frozen_head"] for node in frozen_tail: self._forward_star[node].remove(hyperedge_id) for node in frozen_head: self._backward_star[node].remove(hyperedge_id) del self._successors[frozen_tail][frozen_head] if self._successors[frozen_tail] == {}: del self._successors[frozen_tail] del self._predecessors[frozen_head][frozen_tail] if self._predecessors[frozen_head] == {}: del self._predecessors[frozen_head] del self._hyperedge_attributes[hyperedge_id]
Removes a hyperedge and its attributes from the hypergraph. :param hyperedge_id: ID of the hyperedge to be removed. :raises: ValueError -- No such hyperedge exists. Examples: :: >>> H = DirectedHypergraph() >>> xyz = hyperedge_list = ((["A"], ["B", "C"]), (("A", "B"), ("C"), {'weight': 2}), (set(["B"]), set(["A", "C"]))) >>> H.add_hyperedges(hyperedge_list) >>> H.remove_hyperedge(xyz[0])
388,013
def pipeline(self, config, request): path_chain = request[] if not path_chain or path_chain[0] != : raise HTTPError(400) authers = config.get() if authers is None: raise HTTPError(403) valid_once = False for auth in authers: valid = authers[auth].handle(request) if valid is False: raise HTTPError(403) elif valid is True: valid_once = True if valid_once is not True: self.server.auditlog.svc_access(self.__class__.__name__, log.AUDIT_SVC_AUTH_FAIL, request[], ) raise HTTPError(403) authzers = config.get() if authzers is None: raise HTTPError(403) authz_ok = None for authz in authzers: valid = authzers[authz].handle(request) if valid is True: authz_ok = True elif valid is False: authz_ok = False break if authz_ok is not True: self.server.auditlog.svc_access(self.__class__.__name__, log.AUDIT_SVC_AUTHZ_FAIL, request[], path_chain) raise HTTPError(403) trail = [] while path_chain: if path_chain in config[]: con = config[][path_chain] if len(trail) != 0: request[] = trail return con.handle(request) trail.insert(0, path_chain[-1]) path_chain = path_chain[:-1] raise HTTPError(404)
The pipeline() function handles authentication and invocation of the correct consumer based on the server configuration, that is provided at initialization time. When authentication is performed all the authenticators are executed. If any returns False, authentication fails and a 403 error is raised. If none of them positively succeeds and they all return None then also authentication fails and a 403 error is raised. Authentication plugins can add attributes to the request object for use of authorization or other plugins. When authorization is performed and positive result will cause the operation to be accepted and any negative result will cause it to fail. If no authorization plugin returns a positive result a 403 error is returned. Once authentication and authorization are successful the pipeline will parse the path component and find the consumer plugin that handles the provided path walking up the path component by component until a consumer is found. Paths are walked up from the leaf to the root, so if two consumers hang on the same tree, the one closer to the leaf will be used. If there is a trailing path when the conumer is selected then it will be stored in the request dicstionary named 'trail'. The 'trail' is an ordered list of the path components below the consumer entry point.
388,014
def execute(self, context): hook = SQSHook(aws_conn_id=self.aws_conn_id) result = hook.send_message(queue_url=self.sqs_queue, message_body=self.message_content, delay_seconds=self.delay_seconds, message_attributes=self.message_attributes) self.log.info(, result) return result
Publish the message to SQS queue :param context: the context object :type context: dict :return: dict with information about the message sent For details of the returned dict see :py:meth:`botocore.client.SQS.send_message` :rtype: dict
388,015
def is_me(self): logger.info("And arbiter is launched with the hostname:%s " "from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn()) return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
Check if parameter name if same than name of this object TODO: is it useful? :return: true if parameter name if same than this name :rtype: bool
388,016
def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group[]: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError() state = self.state[p] if len(state) == 0: state[] = 0 state[] = torch.zeros_like(p.data) state[] = torch.zeros_like(p.data) next_m, next_v = state[], state[] beta1, beta2 = group[], group[] if group[] > 0: clip_grad_norm_(p, group[]) next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group[]) return loss
Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss.
388,017
def write_hdf5_segmentlist(seglist, output, path=None, **kwargs): if path is None: raise ValueError("Please specify the HDF5 path via the " "``path=`` keyword argument") data = numpy.zeros((len(seglist), 4), dtype=int) for i, seg in enumerate(seglist): start, end = map(LIGOTimeGPS, seg) data[i, :] = (start.gpsSeconds, start.gpsNanoSeconds, end.gpsSeconds, end.gpsNanoSeconds) segtable = Table(data, names=[, , , ]) return segtable.write(output, path=path, format=, **kwargs)
Write a `SegmentList` to an HDF5 file/group Parameters ---------- seglist : :class:`~ligo.segments.segmentlist` data to write output : `str`, `h5py.File`, `h5py.Group` filename or HDF5 object to write to path : `str` path to which to write inside the HDF5 file, relative to ``output`` **kwargs other keyword arguments are passed to :meth:`~astropy.table.Table.write`
388,018
def unary_from_softmax(sm, scale=None, clip=1e-5): num_cls = sm.shape[0] if scale is not None: assert 0 < scale <= 1, "`scale` needs to be in (0,1]" uniform = np.ones(sm.shape) / num_cls sm = scale * sm + (1 - scale) * uniform if clip is not None: sm = np.clip(sm, clip, 1.0) return -np.log(sm).reshape([num_cls, -1]).astype(np.float32)
Converts softmax class-probabilities to unary potentials (NLL per node). Parameters ---------- sm: numpy.array Output of a softmax where the first dimension is the classes, all others will be flattend. This means `sm.shape[0] == n_classes`. scale: float The certainty of the softmax output (default is None). If not None, the softmax outputs are scaled to range from uniform probability for 0 outputs to `scale` probability for 1 outputs. clip: float Minimum value to which probability should be clipped. This is because the unary is the negative log of the probability, and log(0) = inf, so we need to clip 0 probabilities to a positive value.
388,019
def init_app(self, app): self.init_config(app) blueprint = rest.create_blueprint( app.config[] ) post_action.connect(index_deposit_after_publish, sender=app, weak=False)
Flask application initialization. Initialize the REST endpoints. Connect all signals if `DEPOSIT_REGISTER_SIGNALS` is True. :param app: An instance of :class:`flask.Flask`.
388,020
def range(self, channels=None): if channels is None: channels = self._channels channels = self._name_to_index(channels) if hasattr(channels, ) \ and not isinstance(channels, six.string_types): return [self._range[ch] for ch in channels] else: return self._range[channels]
Get the range of the specified channel(s). The range is a two-element list specifying the smallest and largest values that an event in a channel should have. Note that with floating point data, some events could have values outside the range in either direction due to instrument compensation. The range should be transformed along with the data when passed through a transformation function. The range of channel "n" is extracted from the $PnR parameter as ``[0, $PnR - 1]``. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the range. If None, return a list with the range of all channels, in the order of ``FCSData.channels``. Return ------ array or list of arrays The range of the specified channel(s).
388,021
def feature_match(template, image, options=None): op = _DEF_TM_OPT.copy() if options is not None: op.update(options) feat = fe.factory(op[]) tmpl_f = feat(template, op) img_f = feat(image, op) scale = image.shape[0] / img_f.shape[0] heatmap = match_template(tmpl_f, img_f, op) return heatmap, scale
Match template and image by extracting specified feature :param template: Template image :param image: Search image :param options: Options include - feature: Feature extractor to use. Default is 'rgb'. Available options are: 'hog', 'lab', 'rgb', 'gray' :return: Heatmap
388,022
def redirects(self): if self._redirects is None: self._redirects = list() self.__pull_combined_properties() return self._redirects
list: List of all redirects to this page; **i.e.,** the titles \ listed here will redirect to this page title Note: Not settable
388,023
def _configure_device(commands, **kwargs): if salt.utils.platform.is_proxy(): return __proxy__[](commands, **kwargs) else: return _nxapi_config(commands, **kwargs)
Helper function to send configuration commands to the device over a proxy minion or native minion using NX-API or SSH.
388,024
def fanout(self, hosts=None, timeout=None, max_concurrency=64, auto_batch=None): return MapManager(self.get_fanout_client(hosts, max_concurrency, auto_batch), timeout=timeout)
Returns a context manager for a map operation that fans out to manually specified hosts instead of using the routing system. This can for instance be used to empty the database on all hosts. The context manager returns a :class:`FanoutClient`. Example usage:: with cluster.fanout(hosts=[0, 1, 2, 3]) as client: results = client.info() for host_id, info in results.value.iteritems(): print '%s -> %s' % (host_id, info['is']) The promise returned accumulates all results in a dictionary keyed by the `host_id`. The `hosts` parameter is a list of `host_id`\s or alternatively the string ``'all'`` to send the commands to all hosts. The fanout APi needs to be used with a lot of care as it can cause a lot of damage when keys are written to hosts that do not expect them.
388,025
def meta_changed_notify_after(self, state_machine_m, _, info): meta_signal_message = info[] if meta_signal_message.origin == "graphical_editor_gaphas": return if meta_signal_message.origin == "load_meta_data": "MAX_VISIBLE_LIBRARY_HIERARCHY is 1.") if library_state_m.show_content(): if not library_state_m.state_copy_initialized: logger.warning("Show library content without initialized state copy does not work {0}" "".format(library_state_m)) logger.debug("Show content of {}".format(library_state_m.state)) gui_helper_meta_data.scale_library_content(library_state_m) self.add_state_view_for_model(library_state_m.state_copy, view, hierarchy_level=library_state_v.hierarchy_level + 1) else: logger.debug("Hide content of {}".format(library_state_m.state)) state_copy_v = self.canvas.get_view_for_model(library_state_m.state_copy) if state_copy_v: state_copy_v.remove() else: if isinstance(view, StateView): view.apply_meta_data(recursive=meta_signal_message.affects_children) else: view.apply_meta_data() self.canvas.request_update(view, matrix=True) self.canvas.wait_for_update()
Handle notification about the change of a state's meta data The meta data of the affected state(s) are read and the view updated accordingly. :param StateMachineModel state_machine_m: Always the state machine model belonging to this editor :param str _: Always "state_meta_signal" :param dict info: Information about the change, contains the MetaSignalMessage in the 'arg' key value
388,026
def _handle_auth(self, dtype, data, ts): if dtype == : raise NotImplementedError channel_id = data.pop() user_id = data.pop() identifier = (, user_id) self.channel_handlers[identifier] = channel_id self.channel_directory[identifier] = channel_id self.channel_directory[channel_id] = identifier
Handles authentication responses. :param dtype: :param data: :param ts: :return:
388,027
def addItem(self, item): try: self.tree.addItem(item) except AttributeError, e: raise VersionError()
Adds an item if the tree is mutable
388,028
def _iter_rawterms(cls, tree): for elem in tree.iterfind(OWL_CLASS): if RDF_ABOUT not in elem.keys(): continue rawterm = cls._extract_resources(elem) rawterm[] = cls._get_id_from_url(elem.get(RDF_ABOUT)) yield rawterm
Iterate through the raw terms (Classes) in the ontology.
388,029
def _split_line_with_offsets(line): for delimiter in re.finditer(r"[\.,:\;](?![^\s])", line): span = delimiter.span() line = line[:span[0]] + " " + line[span[1]:] for delimiter in re.finditer(r"[\"\)\]\}>\s])", line): span = delimiter.span() line = line[:span[0]] + " " + line[span[1]:] for delimiter in re.finditer(r"(?<![^\.,\;:\"\(\[\{<]", line): span = delimiter.span() line = line[:span[0]] + " " + line[span[1]:] line = line.replace("-", " ") line = line.replace("`", " ") for match in re.finditer(r"[^\s]+", line): content = match.group(0) if content.strip() != "": yield (match.span()[0], content)
Split a line by delimiter, but yield tuples of word and offset. This function works by dropping all the english-like punctuation from a line (so parenthesis preceded or succeeded by spaces, periods, etc) and then splitting on spaces.
388,030
def find_id_in_folder(self, name, parent_folder_id=0): if name is None or len(name) == 0: return parent_folder_id offset = 0 resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=[]) total = int(resp[]) while offset < total: found = self.__find_name(resp, name) if found is not None: return found offset += int(len(resp[])) resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=[]) return None
Find a folder or a file ID from its name, inside a given folder. Args: name (str): Name of the folder or the file to find. parent_folder_id (int): ID of the folder where to search. Returns: int. ID of the file or folder found. None if not found. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
388,031
def get_section(self, section_id, params={}): url = SECTIONS_API.format(section_id) return CanvasSection(data=self._get_resource(url, params=params))
Return section resource for given canvas section id. https://canvas.instructure.com/doc/api/sections.html#method.sections.show
388,032
def dump(self): from rez.utils.formatting import columnise rows = [] for i, phase in enumerate(self.phase_stack): rows.append((self._depth_label(i), phase.status, str(phase))) print "status: %s (%s)" % (self.status.name, self.status.description) print "initial request: %s" % str(self.request_list) print print "solve stack:" print .join(columnise(rows)) if self.failed_phase_list: rows = [] for i, phase in enumerate(self.failed_phase_list): rows.append((" print print "previous failures:" print .join(columnise(rows))
Print a formatted summary of the current solve state.
388,033
def load(self, config): web_list = [] if config is None: logger.debug("No configuration file available. Cannot load ports list.") elif not config.has_section(self._section): logger.debug("No [%s] section in the configuration file. Cannot load ports list." % self._section) else: logger.debug("Start reading the [%s] section in the configuration file" % self._section) refresh = int(config.get_value(self._section, , default=self._default_refresh)) timeout = int(config.get_value(self._section, , default=self._default_timeout)) for i in range(1, 256): new_web = {} postfix = % str(i) new_web[] = config.get_value(self._section, % (postfix, )) if new_web[] is None: continue url_parse = urlparse(new_web[]) if not bool(url_parse.scheme) or not bool(url_parse.netloc): logger.error( % (new_web[], self._section)) continue new_web[] = config.get_value(self._section, % postfix, default="%s" % url_parse.netloc) new_web[] = None new_web[] = 0 new_web[] = refresh new_web[] = int(config.get_value(self._section, % postfix, default=timeout)) new_web[] = config.get_value(self._section, % postfix, default=None) if new_web[] is not None: new_web[] = int(new_web[]) / 1000.0 new_web[] = + str(i) new_web[] = config.get_value(self._section, % postfix, default=True) http_proxy = config.get_value(self._section, % postfix, default=None) https_proxy = config.get_value(self._section, % postfix, default=None) if https_proxy is None and http_proxy is None: new_web[] = None else: new_web[] = { : http_proxy, : https_proxy } logger.debug("Add Web URL %s to the static list" % new_web[]) web_list.append(new_web) logger.debug("Web list loaded: %s" % web_list) return web_list
Load the web list from the configuration file.
388,034
def absent(name, **connection_args): ret = {: name, : {}, : True, : } if __salt__[](name, **connection_args): if __opts__[]: ret[] = None ret[] = \ .format(name) return ret if __salt__[](name, **connection_args): ret[] = .format(name) ret[][name] = return ret else: err = _get_mysql_error() if err is not None: ret[] = \ .format(name, err) ret[] = False return ret else: err = _get_mysql_error() if err is not None: ret[] = err ret[] = False return ret ret[] = ( ).format(name) return ret
Ensure that the named database is absent name The name of the database to remove
388,035
def main(): colorama.init(wrap=six.PY3) doc = usage.get_primary_command_usage() allow_subcommands = in doc args = docopt(doc, version=settings.version, options_first=allow_subcommands) if sys.excepthook is sys.__excepthook__: sys.excepthook = log.excepthook try: log.enable_logging(log.get_log_level(args)) default_args = sys.argv[2 if args.get() else 1:] if (args.get() == and None not in settings.subcommands): subcommand = next(iter(args.get(, default_args)), None) return usage.get_help_usage(subcommand) argv = [args.get()] + args.get(, default_args) return _run_command(argv) except exc.InvalidCliValueError as e: return str(e)
Parse the command line options and launch the requested command. If the command is 'help' then print the help message for the subcommand; if no subcommand is given, print the standard help message.
388,036
def handleMatch(self, m): userStr = m.group(3) imgURL = processString(userStr) el = etree.Element() el.set(, imgURL) el.set(, userStr) el.set(, userStr) return el
Handles user input into [magic] tag, processes it, and inserts the returned URL into an <img> tag through a Python ElementTree <img> Element.
388,037
def run(data): name = dd.get_sample_name(data) in_bam = dd.get_transcriptome_bam(data) config = data[] if not in_bam: logger.info("Transcriptome-mapped BAM file not found, skipping eXpress.") return data out_dir = os.path.join(dd.get_work_dir(data), "express", name) out_file = os.path.join(out_dir, name + ".xprs") express = config_utils.get_program("express", data[]) strand = _set_stranded_flag(in_bam, data) if not file_exists(out_file): gtf_fasta = gtf.gtf_to_fasta(dd.get_gtf_file(data), dd.get_ref_file(data)) with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_dir) as tx_out_dir: bam_file = _prepare_bam_file(in_bam, tmp_dir, config) cmd = ("{express} --no-update-check -o {tx_out_dir} {strand} {gtf_fasta} {bam_file}") do.run(cmd.format(**locals()), "Run express on %s." % in_bam, {}) shutil.move(os.path.join(out_dir, "results.xprs"), out_file) eff_count_file = _get_column(out_file, out_file.replace(".xprs", "_eff.counts"), 7, data=data) tpm_file = _get_column(out_file, out_file.replace("xprs", "tpm"), 14, data=data) fpkm_file = _get_column(out_file, out_file.replace("xprs", "fpkm"), 10, data=data) data = dd.set_express_counts(data, eff_count_file) data = dd.set_express_tpm(data, tpm_file) data = dd.set_express_fpkm(data, fpkm_file) return data
Quantitaive isoforms expression by eXpress
388,038
def idle_task(self): for r in self.repeats: if r.event.trigger(): self.mpstate.functions.process_stdin(r.cmd, immediate=True)
called on idle
388,039
def task(name, deps = None, fn = None): if callable(deps): fn = deps deps = None if not deps and not fn: logger.log(logger.red("The task is empty" % name)) else: tasks[name] = [fn, deps]
Define a new task.
388,040
def paginate(self): project_dir = self.project_dir raw_dir = self.raw_dir batch_dir = self.batch_dir if project_dir is None: raise UnderDefined("no project directory defined") if raw_dir is None: raise UnderDefined("no raw directory defined") if batch_dir is None: raise UnderDefined("no batcb directory defined") if not os.path.isdir(project_dir): os.mkdir(project_dir) logging.info(f"created folder {project_dir}") if not os.path.isdir(batch_dir): os.mkdir(batch_dir) logging.info(f"created folder {batch_dir}") if not os.path.isdir(raw_dir): os.mkdir(raw_dir) logging.info(f"created folder {raw_dir}") return project_dir, batch_dir, raw_dir
Make folders where we would like to put results etc.
388,041
def detect_direct_function_shadowing(contract): functions_declared = {function.full_name: function for function in contract.functions_and_modifiers_not_inherited} results = {} for base_contract in reversed(contract.immediate_inheritance): for base_function in base_contract.functions_and_modifiers: if base_function.full_name in results: continue if base_function.is_implemented and base_function.full_name in functions_declared: results[base_function.full_name] = (functions_declared[base_function.full_name], base_contract, base_function) return list(results.values())
Detects and obtains functions which are shadowed immediately by the provided ancestor contract. :param contract: The ancestor contract which we check for function shadowing within. :return: A list of tuples (overshadowing_function, overshadowed_immediate_base_contract, overshadowed_function) -overshadowing_function is the function defined within the provided contract that overshadows another definition. -overshadowed_immediate_base_contract is the immediate inherited-from contract that provided the shadowed function (could have provided it through inheritance, does not need to directly define it). -overshadowed_function is the function definition which is overshadowed by the provided contract's definition.
388,042
def _factory(cls, constraints, op): pieces = [] for i, constraint in enumerate(constraints): pieces.append(constraint) if i != len(constraints) - 1: pieces.append(op) return cls(pieces)
Factory for joining constraints with a single conjunction
388,043
def _handle_chat(self, data): self.conn.enqueue_data( "chat", ChatMessage.from_data(self.room, self.conn, data) )
Handle chat messages
388,044
def search_upwards(self, fpath=None, repodirname=, upwards={}): fpath = fpath or self.fpath uuid = self.unique_id last_path = self path_comp = fpath.split(os.path.sep) for n in xrange(1, len(path_comp)-1): checkpath = os.path.join(*path_comp[0:-1 * n]) repodir = os.path.join(checkpath, repodirname) upw_uuid = upwards.get(repodir) if upw_uuid: if upw_uuid == uuid: last_path = SvnRepository(checkpath) continue else: break elif os.path.exists(repodir): repo = SvnRepository(checkpath) upw_uuid = repo.unique_id upwards[repodir] = upw_uuid if upw_uuid == uuid: last_path = repo continue else: break return last_path
Traverse filesystem upwards, searching for .svn directories with matching UUIDs (Recursive) Args: fpath (str): file path to search upwards from repodirname (str): directory name to search for (``.svn``) upwards (dict): dict of already-searched directories example:: repo/.svn repo/dir1/.svn repo/dir1/dir2/.svn >> search_upwards('repo/') << 'repo/' >> search_upwards('repo/dir1') << 'repo/' >> search_upwards('repo/dir1/dir2') << 'repo/' repo/.svn repo/dirA/ repo/dirA/dirB/.svn >> search_upwards('repo/dirA') << 'repo/' >> search_upwards('repo/dirA/dirB') >> 'repo/dirB')
388,045
def pretty_time(timestamp: str): try: parsed = iso_8601.parse_datetime(timestamp) except ValueError: now = datetime.utcnow().replace(tzinfo=timezone.utc) try: delta = iso_8601.parse_delta(timestamp) except ValueError: delta = human_time.parse_timedelta(timestamp) parsed = now - delta echo(human_time.human_timestamp(parsed))
Format timestamp for human consumption.
388,046
def _sync_io(self): if self._file_epoch == self.file_object.epoch: return if self._io.binary: contents = self.file_object.byte_contents else: contents = self.file_object.contents self._set_stream_contents(contents) self._file_epoch = self.file_object.epoch
Update the stream with changes to the file object contents.
388,047
def find_bidi(self, el): for node in self.get_children(el, tags=False): if self.is_tag(node): direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(node, , )), None) if ( self.get_tag(node) in (, , , , ) or not self.is_html_tag(node) or direction is not None ): continue return None
Get directionality from element text.
388,048
def filter_missing(self): missing = None locus_count = 0 self.genotype_file.seek(0) for genotypes in self.genotype_file: genotypes = genotypes.split() chr, rsid, junk, pos = genotypes[0:4] if DataParser.boundary.TestBoundary(chr, pos, rsid): locus_count += 1 allelic_data = numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2) if missing is None: missing = numpy.zeros(allelic_data.shape[0], dtype=) missing += (numpy.sum(0+(allelic_data==DataParser.missing_representation), axis=1)/2) max_missing = DataParser.ind_miss_tol * locus_count dropped_individuals = 0+(max_missing<missing) self.ind_mask[:,0] = self.ind_mask[:,0]|dropped_individuals self.ind_mask[:,1] = self.ind_mask[:,1]|dropped_individuals valid_individuals = numpy.sum(self.ind_mask==0) max_missing = DataParser.snp_miss_tol * valid_individuals self.locus_count = 0 dropped_snps = [] self.genotype_file.seek(0) for genotypes in self.genotype_file: genotypes = genotypes.split() chr, rsid, junk, pos = genotypes[0:4] chr = int(chr) pos = int(pos) if DataParser.boundary.TestBoundary(chr, pos, rsid): allelic_data = numpy.ma.MaskedArray(numpy.array(genotypes[4:], dtype="S2").reshape(-1, 2), self.ind_mask).compressed() missing = numpy.sum(0+(allelic_data==DataParser.missing_representation)) if missing > max_missing: DataParser.boundary.dropped_snps[int(chr)].add(int(pos)) dropped_snps.append(rsid) else: self.locus_count += 1
Filter out individuals and SNPs that have too many missing to be considered
388,049
def add_cmds_cpdir(cpdir, cmdpkl, cpfileglob=, require_cmd_magcolor=True, save_cmd_pngs=False): s objectinfo dict. save_cmd_pngs : bool If this is True, then will save the CMD plots that were generated and added back to the checkplotdict as PNGs to the same directory as `cpx`. Returns ------- Nothing. ' cplist = glob.glob(os.path.join(cpdir, cpfileglob)) return add_cmds_cplist(cplist, cmdpkl, require_cmd_magcolor=require_cmd_magcolor, save_cmd_pngs=save_cmd_pngs)
This adds CMDs for each object in cpdir. Parameters ---------- cpdir : list of str This is the directory to search for checkplot pickles. cmdpkl : str This is the filename of the CMD pickle created previously. cpfileglob : str The UNIX fileglob to use when searching for checkplot pickles to operate on. require_cmd_magcolor : bool If this is True, a CMD plot will not be made if the color and mag keys required by the CMD are not present or are nan in each checkplot's objectinfo dict. save_cmd_pngs : bool If this is True, then will save the CMD plots that were generated and added back to the checkplotdict as PNGs to the same directory as `cpx`. Returns ------- Nothing.
388,050
def datapoint_indices_for_tensor(self, tensor_index): if tensor_index >= self._num_tensors: raise ValueError( %(tensor_index, self._num_tensors)) return self._file_num_to_indices[tensor_index]
Returns the indices for all datapoints in the given tensor.
388,051
def marshal(self, values): if values is not None: return [super(EntityCollection, self).marshal(v) for v in values]
Turn a list of entities into a list of dictionaries. :param values: The entities to serialize. :type values: List[stravalib.model.BaseEntity] :return: List of dictionaries of attributes :rtype: List[Dict[str, Any]]
388,052
def _search(self, limit, format): limit = min(limit, self.MAX_SEARCH_PER_QUERY) payload = { : self.query, : limit, : self.current_offset, } payload.update(self.CUSTOM_PARAMS) headers = { : self.api_key } if not self.silent_fail: QueryChecker.check_web_params(payload, headers) response = requests.get(self.QUERY_URL, params=payload, headers=headers) json_results = self.get_json_results(response) packaged_results = [NewsResult(single_result_json) for single_result_json in json_results["value"]] self.current_offset += min(50, limit, len(packaged_results)) return packaged_results
Returns a list of result objects, with the url for the next page MsCognitive search url.
388,053
def boggle_hill_climbing(board=None, ntimes=100, verbose=True): finder = BoggleFinder() if board is None: board = random_boggle() best = len(finder.set_board(board)) for _ in range(ntimes): i, oldc = mutate_boggle(board) new = len(finder.set_board(board)) if new > best: best = new if verbose: print best, _, board else: board[i] = oldc if verbose: print_boggle(board) return board, best
Solve inverse Boggle by hill-climbing: find a high-scoring board by starting with a random one and changing it.
388,054
def proc_collector(process_map, args, pipeline_string): arguments_list = [] if args.detailed_list: arguments_list += [ "input_type", "output_type", "description", "dependencies", "conflicts", "directives" ] if args.short_list: arguments_list += [ "description" ] if arguments_list: procs_dict = {} for name, cls in process_map.items(): cls_inst = cls(template=name) if pipeline_string: if name not in pipeline_string: continue d = {arg_key: vars(cls_inst)[arg_key] for arg_key in vars(cls_inst) if arg_key in arguments_list} procs_dict[name] = d procs_dict_parser(procs_dict) sys.exit(0)
Function that collects all processes available and stores a dictionary of the required arguments of each process class to be passed to procs_dict_parser Parameters ---------- process_map: dict The dictionary with the Processes currently available in flowcraft and their corresponding classes as values args: argparse.Namespace The arguments passed through argparser that will be access to check the type of list to be printed pipeline_string: str the pipeline string
388,055
def _set_interface_brief(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=interface_brief.interface_brief, is_container=, presence=False, yang_name="interface-brief", rest_name="interface-brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=False) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__interface_brief = t if hasattr(self, ): self._set()
Setter method for interface_brief, mapped from YANG variable /isis_state/interface_brief (container) If this variable is read-only (config: false) in the source YANG file, then _set_interface_brief is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_interface_brief() directly. YANG Description: ISIS interface info brief
388,056
def clear_all_events(self): self.lock.acquire() self.event_dict.clear() self.lock.release()
Clear all event queues and their cached events.
388,057
def loadPng(varNumVol, tplPngSize, strPathPng): print() lstPngPaths = [None] * varNumVol for idx01 in range(0, varNumVol): lstPngPaths[idx01] = (strPathPng + str(idx01) + ) aryPngData = np.zeros((tplPngSize[0], tplPngSize[1], varNumVol)) for idx01 in range(0, varNumVol): aryPngData[:, :, idx01] = np.array(Image.open(lstPngPaths[idx01])) aryPngData = (aryPngData > 0).astype(int) return aryPngData
Load PNG files. Parameters ---------- varNumVol : float Number of volumes, i.e. number of time points in all runs. tplPngSize : tuple Shape of the stimulus image (i.e. png). strPathPng: str Path to the folder cointaining the png files. Returns ------- aryPngData : 2d numpy array, shape [png_x, png_y, n_vols] Stack of stimulus data.
388,058
def new_transaction( vm: VM, from_: Address, to: Address, amount: int=0, private_key: PrivateKey=None, gas_price: int=10, gas: int=100000, data: bytes=b) -> BaseTransaction: nonce = vm.state.get_nonce(from_) tx = vm.create_unsigned_transaction( nonce=nonce, gas_price=gas_price, gas=gas, to=to, value=amount, data=data, ) return tx.as_signed_transaction(private_key)
Create and return a transaction sending amount from <from_> to <to>. The transaction will be signed with the given private key.
388,059
def decorator(directname=None): global _decorators class_deco_list = _decorators def wrapper(f): nonlocal directname if directname is None: directname = f.__name__ f.ns_name = directname set_one(class_deco_list, directname, f) return wrapper
Attach a class to a parsing decorator and register it to the global decorator list. The class is registered with its name unless directname is provided
388,060
def query_by_user(cls, user, **kwargs): return cls._filter( cls.query.filter_by(user_id=user.get_id()), **kwargs )
Get a user's memberships.
388,061
def _prepare_transformation_recipe(pattern: str, reduction: str, axes_lengths: Tuple) -> TransformRecipe: left, right = pattern.split() identifiers_left, composite_axes_left = parse_expression(left) identifiers_rght, composite_axes_rght = parse_expression(right) if reduction == : difference = set.symmetric_difference(identifiers_left, identifiers_rght) if len(difference) > 0: raise EinopsError(.format(difference)) elif reduction in _reductions: difference = set.difference(identifiers_rght, identifiers_left) if len(difference) > 0: raise EinopsError(.format(difference)) else: raise EinopsError(.format(reduction)) known_lengths = OrderedDict() position_lookup = {} position_lookup_after_reduction = {} reduced_axes = [] for composite_axis in composite_axes_left: for axis in composite_axis: position_lookup[axis] = len(position_lookup) if axis in identifiers_rght: position_lookup_after_reduction[axis] = len(position_lookup_after_reduction) else: reduced_axes.append(len(known_lengths)) known_lengths[axis] = None def update_axis_length(axis_name, axis_length): if known_lengths[axis_name] is not None: if isinstance(axis_length, int) and isinstance(known_lengths[axis_name], int): if axis_length != known_lengths[axis_name]: raise RuntimeError(.format( axis_name, axis_length, known_lengths[axis_name])) else: known_lengths[axis_name] = axis_length for elementary_axis, axis_length in axes_lengths: if not _check_elementary_axis_name(elementary_axis): raise EinopsError(, elementary_axis) if elementary_axis not in known_lengths: raise EinopsError(.format(elementary_axis)) update_axis_length(elementary_axis, axis_length) input_axes_known_unknown = [] for composite_axis in composite_axes_left: known = {axis for axis in composite_axis if known_lengths[axis] is not None} unknown = {axis for axis in composite_axis if known_lengths[axis] is None} lookup = dict(zip(list(known_lengths), range(len(known_lengths)))) if len(unknown) > 1: raise EinopsError(.format(unknown)) assert len(unknown) + len(known) == len(composite_axis) input_axes_known_unknown.append(([lookup[axis] for axis in known], [lookup[axis] for axis in unknown])) result_axes_grouping = [[position_lookup_after_reduction[axis] for axis in composite_axis] for composite_axis in composite_axes_rght] ellipsis_left = math.inf if _ellipsis not in composite_axes_left else composite_axes_left.index(_ellipsis) ellipsis_rght = math.inf if _ellipsis not in composite_axes_rght else composite_axes_rght.index(_ellipsis) return TransformRecipe(elementary_axes_lengths=list(known_lengths.values()), input_composite_axes=input_axes_known_unknown, output_composite_axes=result_axes_grouping, reduction_type=reduction, reduced_elementary_axes=tuple(reduced_axes), ellipsis_positions=(ellipsis_left, ellipsis_rght) )
Perform initial parsing of pattern and provided supplementary info axes_lengths is a tuple of tuples (axis_name, axis_length)
388,062
def merge_items(from_id, to_id, login_obj, mediawiki_api_url=, ignore_conflicts=, user_agent=config[]): url = mediawiki_api_url headers = { : , : , : user_agent } params = { : , : from_id, : to_id, : login_obj.get_edit_token(), : , : , : ignore_conflicts } try: merge_reply = requests.post(url=url, data=params, headers=headers, cookies=login_obj.get_edit_cookie()) merge_reply.raise_for_status() if in merge_reply.json(): raise MergeError(merge_reply.json()) except requests.HTTPError as e: print(e) return {: } return merge_reply.json()
A static method to merge two Wikidata items :param from_id: The QID which should be merged into another item :type from_id: string with 'Q' prefix :param to_id: The QID into which another item should be merged :type to_id: string with 'Q' prefix :param login_obj: The object containing the login credentials and cookies :type login_obj: instance of PBB_login.WDLogin :param mediawiki_api_url: The MediaWiki url which should be used :type mediawiki_api_url: str :param ignore_conflicts: A string with the values 'description', 'statement' or 'sitelink', separated by a pipe ('|') if using more than one of those. :type ignore_conflicts: str
388,063
def check_config_mode(self, check_string=") return super(CiscoBaseConnection, self).check_config_mode( check_string=check_string, pattern=pattern )
Checks if the device is in configuration mode or not. Cisco IOS devices abbreviate the prompt at 20 chars in config mode
388,064
def compose_object(self, file_list, destination_file, content_type): xml_setting_list = [] for meta_data in file_list: xml_setting_list.append() for key, val in meta_data.iteritems(): xml_setting_list.append( % (key, val, key)) xml_setting_list.append() xml_setting_list.append() xml = .join(xml_setting_list) if content_type is not None: headers = {: content_type} else: headers = None status, resp_headers, content = self.put_object( api_utils._quote_filename(destination_file) + , payload=xml, headers=headers) errors.check_status(status, [200], destination_file, resp_headers, body=content)
COMPOSE multiple objects together. Using the given list of files, calls the put object with the compose flag. This call merges all the files into the destination file. Args: file_list: list of dicts with the file name. destination_file: Path to the destination file. content_type: Content type for the destination file.
388,065
def get_east_asian_width_property(value, is_bytes=False): obj = unidata.ascii_east_asian_width if is_bytes else unidata.unicode_east_asian_width if value.startswith(): negated = value[1:] value = + unidata.unicode_alias[].get(negated, negated) else: value = unidata.unicode_alias[].get(value, value) return obj[value]
Get `EAST ASIAN WIDTH` property.
388,066
def setup_multiprocessing_logging(queue=None): from salt.utils.platform import is_windows global __MP_LOGGING_CONFIGURED global __MP_LOGGING_QUEUE_HANDLER if __MP_IN_MAINPROCESS is True and not is_windows(): __MP_LOGGING_CONFIGURED = True if __MP_LOGGING_QUEUE_HANDLER is not None: return __remove_null_logging_handler() __remove_queue_logging_handler() time.sleep(0.0001) finally: logging._releaseLock()
This code should be called from within a running multiprocessing process instance.
388,067
def process(self, formdata=None, obj=None, data=None, **kwargs): self._obj = obj super(CommonFormMixin, self).process(formdata, obj, data, **kwargs)
Wrap the process method to store the current object instance
388,068
def url(self): if isinstance(self.result, types.BotInlineResult): return self.result.url
The URL present in this inline results. If you want to "click" this URL to open it in your browser, you should use Python's `webbrowser.open(url)` for such task.
388,069
def _prm_write_shared_array(self, key, data, hdf5_group, full_name, flag, **kwargs): if flag == HDF5StorageService.ARRAY: self._prm_write_into_array(key, data, hdf5_group, full_name, **kwargs) elif flag in (HDF5StorageService.CARRAY, HDF5StorageService.EARRAY, HDF5StorageService.VLARRAY): self._prm_write_into_other_array(key, data, hdf5_group, full_name, flag=flag, **kwargs) else: raise RuntimeError( % (flag, key, full_name)) self._hdf5file.flush()
Creates and array that can be used with an HDF5 array object
388,070
def map_components(notsplit_packages, components): packages = set() for c in components: if c in notsplit_packages: packages.add() else: packages.add(c) return list(packages)
Returns a list of packages to install based on component names This is done by checking if a component is in notsplit_packages, if it is, we know we need to install 'ceph' instead of the raw component name. Essentially, this component hasn't been 'split' from the master 'ceph' package yet.
388,071
def _dict_to_map_str_str(self, d): return dict(map( lambda (k, v): (k, str(v).lower() if isinstance(v, bool) else str(v)), d.iteritems() ))
Thrift requires the params and headers dict values to only contain str values.
388,072
def configure_retrieve(self, ns, definition): request_schema = definition.request_schema or Schema() @self.add_route(ns.instance_path, Operation.Retrieve, ns) @qs(request_schema) @response(definition.response_schema) @wraps(definition.func) def retrieve(**path_data): headers = dict() request_data = load_query_string_data(request_schema) response_data = require_response_data(definition.func(**merge_data(path_data, request_data))) definition.header_func(headers, response_data) response_format = self.negotiate_response_content(definition.response_formats) return dump_response_data( definition.response_schema, response_data, headers=headers, response_format=response_format, ) retrieve.__doc__ = "Retrieve a {} by id".format(ns.subject_name)
Register a retrieve endpoint. The definition's func should be a retrieve function, which must: - accept kwargs for path data - return an item or falsey :param ns: the namespace :param definition: the endpoint definition
388,073
def save_history(self, f): warnings.warn( "save_history is deprecated and will be removed in the next " "release, please use save_params with the f_history keyword", DeprecationWarning) self.history.to_file(f)
Saves the history of ``NeuralNet`` as a json file. In order to use this feature, the history must only contain JSON encodable Python data structures. Numpy and PyTorch types should not be in the history. Parameters ---------- f : file-like object or str Examples -------- >>> before = NeuralNetClassifier(mymodule) >>> before.fit(X, y, epoch=2) # Train for 2 epochs >>> before.save_params('path/to/params') >>> before.save_history('path/to/history.json') >>> after = NeuralNetClassifier(mymodule).initialize() >>> after.load_params('path/to/params') >>> after.load_history('path/to/history.json') >>> after.fit(X, y, epoch=2) # Train for another 2 epochs
388,074
def lessThan(self, leftIndex, rightIndex): leftData = self.sourceModel().data(leftIndex, RegistryTableModel.SORT_ROLE) rightData = self.sourceModel().data(rightIndex, RegistryTableModel.SORT_ROLE) return leftData < rightData
Returns true if the value of the item referred to by the given index left is less than the value of the item referred to by the given index right, otherwise returns false.
388,075
def convert_representation(self, i): if self.number_representation == : return i elif self.number_representation == : if i & (1 << self.interpreter._bit_width - 1): return -((~i + 1) & (2**self.interpreter._bit_width - 1)) else: return i elif self.number_representation == : return hex(i)
Return the proper representation for the given integer
388,076
def rescale_gradients(model: Model, grad_norm: Optional[float] = None) -> Optional[float]: if grad_norm: parameters_to_clip = [p for p in model.parameters() if p.grad is not None] return sparse_clip_norm(parameters_to_clip, grad_norm) return None
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
388,077
def solution(self, expr, v, extra_constraints=(), solver=None, model_callback=None): if self._solver_required and solver is None: raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._solution(self.convert(expr), self.convert(v), extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback)
Return True if `v` is a solution of `expr` with the extra constraints, False otherwise. :param expr: An expression (an AST) to evaluate :param v: The proposed solution (an AST) :param solver: A solver object, native to the backend, to assist in the evaluation (for example, a z3.Solver). :param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve. :param model_callback: a function that will be executed with recovered models (if any) :return: True if `v` is a solution of `expr`, False otherwise
388,078
def get_bios_firmware_version(snmp_client): try: bios_firmware_version = snmp_client.get(BIOS_FW_VERSION_OID) return six.text_type(bios_firmware_version) except SNMPFailure as e: raise SNMPBIOSFirmwareFailure( SNMP_FAILURE_MSG % ("GET BIOS FIRMWARE VERSION", e))
Get bios firmware version of the node. :param snmp_client: an SNMP client object. :raises: SNMPFailure if SNMP operation failed. :returns: a string of bios firmware version.
388,079
def _get_path_pattern_tornado45(self, router=None): if router is None: router = self.application.default_router for rule in router.rules: if rule.matcher.match(self.request) is not None: if isinstance(rule.matcher, routing.PathMatches): return rule.matcher.regex.pattern elif isinstance(rule.target, routing.Router): return self._get_path_pattern_tornado45(rule.target)
Return the path pattern used when routing a request. (Tornado>=4.5) :param tornado.routing.Router router: (Optional) The router to scan. Defaults to the application's router. :rtype: str
388,080
def print_trace(self, file=sys.stdout, base=10, compact=False): if len(self.trace) == 0: raise PyrtlError() if base not in (2, 8, 10, 16): raise PyrtlError() basekey = {2: , 8: , 10: , 16: }[base] ident_len = max(len(w) for w in self.trace) if compact: for w in sorted(self.trace, key=_trace_sort_key): vals = .join(.format(x, basekey) for x in self.trace[w]) file.write(w.rjust(ident_len) + + vals + ) else: maxlenval = max(len(.format(x, basekey)) for w in self.trace for x in self.trace[w]) file.write( * (ident_len - 3) + "--- Values in base %d ---\n" % base) for w in sorted(self.trace, key=_trace_sort_key): vals = .join(.format(x, maxlenval, basekey) for x in self.trace[w]) file.write(w.ljust(ident_len + 1) + vals + ) file.flush()
Prints a list of wires and their current values. :param int base: the base the values are to be printed in :param bool compact: whether to omit spaces in output lines
388,081
def insert(self, song): if song in self._songs: return if self._current_song is None: self._songs.append(song) else: index = self._songs.index(self._current_song) self._songs.insert(index + 1, song)
在当前歌曲后插入一首歌曲
388,082
def start(workflow_name, data=None, object_id=None, **kwargs): from .proxies import workflow_object_class from .worker_engine import run_worker if data is None and object_id is None: raise WorkflowsMissingData("No data or object_id passed to task.ß") if object_id is not None: obj = workflow_object_class.get(object_id) if not obj: raise WorkflowsMissingObject( "Cannot find object: {0}".format(object_id) ) data = [obj] else: if not isinstance(data, (list, tuple)): data = [data] return text_type(run_worker(workflow_name, data, **kwargs).uuid)
Start a workflow by given name for specified data. The name of the workflow to start is considered unique and it is equal to the name of a file containing the workflow definition. The data passed could be a list of Python standard data types such as strings, dict, integers etc. to run through the workflow. Inside the workflow tasks, this data is then available through ``obj.data``. Or alternatively, pass the WorkflowObject to work on via ``object_id`` parameter. NOTE: This will replace any value in ``data``. This is also a Celery (http://celeryproject.org) task, so you can access the ``start.delay`` function to enqueue the execution of the workflow asynchronously. :param workflow_name: the workflow name to run. Ex: "my_workflow". :type workflow_name: str :param data: the workflow name to run. Ex: "my_workflow" (optional if ``object_id`` provided). :type data: tuple :param object_id: id of ``WorkflowObject`` to run (optional). :type object_id: int :return: UUID of the workflow engine that ran the workflow.
388,083
def _netbsd_gpu_data(): known_vendors = [, , , , , , , ] gpus = [] try: pcictl_out = __salt__[]() for line in pcictl_out.splitlines(): for vendor in known_vendors: vendor_match = re.match( r.format(vendor), line, re.IGNORECASE ) if vendor_match: gpus.append({: vendor_match.group(1), : vendor_match.group(2)}) except OSError: pass grains = {} grains[] = len(gpus) grains[] = gpus return grains
num_gpus: int gpus: - vendor: nvidia|amd|ati|... model: string
388,084
def is_ancestor(self, commit1, commit2, patch=False): result = self.hg("log", "-r", "first(%s::%s)" % (commit1, commit2), "--template", "exists", patch=patch) return "exists" in result
Returns True if commit1 is a direct ancestor of commit2, or False otherwise. This method considers a commit to be a direct ancestor of itself
388,085
async def update(self, obj, only=None): field_dict = dict(obj.__data__) pk_field = obj._meta.primary_key if only: self._prune_fields(field_dict, only) if obj._meta.only_save_dirty: self._prune_fields(field_dict, obj.dirty_fields) if obj._meta.composite_key: for pk_part_name in pk_field.field_names: field_dict.pop(pk_part_name, None) else: field_dict.pop(pk_field.name, None) query = obj.update(**field_dict).where(obj._pk_expr()) result = await self.execute(query) obj._dirty.clear() return result
Update the object in the database. Optionally, update only the specified fields. For creating a new object use :meth:`.create()` :param only: (optional) the list/tuple of fields or field names to update
388,086
def file_content(self, value): if isinstance(value, FileContent): self._file_content = value else: self._file_content = FileContent(value)
The Base64 encoded content of the attachment :param value: The Base64 encoded content of the attachment :type value: FileContent, string
388,087
def parse_element(raw_element: str) -> List[Element]: elements = [regex.match("^(([a-zA-Z]+)\(([^;]+),List\(([^;]*)\)\))$", elem.lstrip().rstrip()) for elem in raw_element.split()] return [interpret_element(*elem.groups()[1:]) for elem in elements if elem]
Parse a raw element into text and indices (integers).
388,088
def list_services(request, step): all_datas = [] if step == : services = ServicesActivated.objects.filter(status=1) elif step == : services = ServicesActivated.objects.filter(status=1, id__iexact=request.id) for class_name in services: all_datas.append({class_name: class_name.name.rsplit(, 1)[1]}) return all_datas
get the activated services added from the administrator :param request: request object :param step: the step which is proceeded :type request: HttpRequest object :type step: string :return the activated services added from the administrator
388,089
def cd_to(path, mkdir=False): def cd_to_decorator(func): @functools.wraps(func) def _cd_and_exec(*args, **kwargs): with cd(path, mkdir): return func(*args, **kwargs) return _cd_and_exec return cd_to_decorator
make a generator like cd, but use it for function Usage:: >>> @cd_to("/") ... def say_where(): ... print(os.getcwd()) ... >>> say_where() /
388,090
def to_python(self, value): if isinstance(value, DirDescriptor): return value elif isinstance(value, str): return DirDescriptor(value) elif isinstance(value, dict): try: path = value[] except KeyError: raise ValidationError("dictionary must contain a element") if not isinstance(path, str): raise ValidationError("fieldsizes size element must be an integer") total_size = value.get(, None) if total_size is not None and not isinstance(total_size, int): raise ValidationError("fieldrefss refs element must be a list of strings") return DirDescriptor( path, size=size, total_size=total_size, refs=refs, ) elif not isinstance(value, None): raise ValidationError("field must be a DirDescriptor, string or a dict")
Convert value if needed.
388,091
def _get_content_type(url, session): scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) if scheme not in (, ): return resp = session.head(url, allow_redirects=True) resp.raise_for_status() return resp.headers.get("Content-Type", "")
Get the Content-Type of the given url, using a HEAD request
388,092
def getFieldMax(self, fieldName): stats = self.getStats() if stats == None: return None maxValues = stats.get(, None) if maxValues == None: return None index = self.getFieldNames().index(fieldName) return maxValues[index]
If underlying implementation does not support min/max stats collection, or if a field type does not support min/max (non scalars), the return value will be None. :param fieldName: (string) name of field to get max :returns: current maximum value for the field ``fieldName``.
388,093
def create_dialog_node(self, workspace_id, dialog_node, description=None, conditions=None, parent=None, previous_sibling=None, output=None, context=None, metadata=None, next_step=None, title=None, node_type=None, event_name=None, variable=None, actions=None, digress_in=None, digress_out=None, digress_out_slots=None, user_label=None, **kwargs): if workspace_id is None: raise ValueError() if dialog_node is None: raise ValueError() if output is not None: output = self._convert_model(output, DialogNodeOutput) if next_step is not None: next_step = self._convert_model(next_step, DialogNodeNextStep) if actions is not None: actions = [ self._convert_model(x, DialogNodeAction) for x in actions ] headers = {} if in kwargs: headers.update(kwargs.get()) sdk_headers = get_sdk_headers(, , ) headers.update(sdk_headers) params = {: self.version} data = { : dialog_node, : description, : conditions, : parent, : previous_sibling, : output, : context, : metadata, : next_step, : title, : node_type, : event_name, : variable, : actions, : digress_in, : digress_out, : digress_out_slots, : user_label } url = .format( *self._encode_path_vars(workspace_id)) response = self.request( method=, url=url, headers=headers, params=params, json=data, accept_json=True) return response
Create dialog node. Create a new dialog node. This operation is limited to 500 requests per 30 minutes. For more information, see **Rate limiting**. :param str workspace_id: Unique identifier of the workspace. :param str dialog_node: The dialog node ID. This string must conform to the following restrictions: - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot characters. - It must be no longer than 1024 characters. :param str description: The description of the dialog node. This string cannot contain carriage return, newline, or tab characters, and it must be no longer than 128 characters. :param str conditions: The condition that will trigger the dialog node. This string cannot contain carriage return, newline, or tab characters, and it must be no longer than 2048 characters. :param str parent: The ID of the parent dialog node. This property is omitted if the dialog node has no parent. :param str previous_sibling: The ID of the previous sibling dialog node. This property is omitted if the dialog node has no previous sibling. :param DialogNodeOutput output: The output of the dialog node. For more information about how to specify dialog node output, see the [documentation](https://cloud.ibm.com/docs/services/assistant/dialog-overview.html#dialog-overview-responses). :param dict context: The context for the dialog node. :param dict metadata: The metadata for the dialog node. :param DialogNodeNextStep next_step: The next step to execute following this dialog node. :param str title: The alias used to identify the dialog node. This string must conform to the following restrictions: - It can contain only Unicode alphanumeric, space, underscore, hyphen, and dot characters. - It must be no longer than 64 characters. :param str node_type: How the dialog node is processed. :param str event_name: How an `event_handler` node is processed. :param str variable: The location in the dialog context where output is stored. :param list[DialogNodeAction] actions: An array of objects describing any actions to be invoked by the dialog node. :param str digress_in: Whether this top-level dialog node can be digressed into. :param str digress_out: Whether this dialog node can be returned to after a digression. :param str digress_out_slots: Whether the user can digress to top-level nodes while filling out slots. :param str user_label: A label that can be displayed externally to describe the purpose of the node to users. This string must be no longer than 512 characters. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
388,094
def match(self, search, **kwargs): kwargs[] = True if kwargs.get(): return self.match_with_http_info(search, **kwargs) else: (data) = self.match_with_http_info(search, **kwargs) return data
Searches for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. Only exact matches are returned. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.match(search, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str search: Url to search for (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :return: RepositoryConfigurationPage If the method is called asynchronously, returns the request thread.
388,095
def v1_folder_rename(request, response, kvlclient, fid_src, fid_dest, sfid_src=None, sfid_dest=None): src, dest = make_path(fid_src, sfid_src), make_path(fid_dest, sfid_dest) new_folders(kvlclient, request).move(src, dest) response.status = 200
Rename a folder or a subfolder. The routes for this endpoint are: * ``POST /dossier/v1/<fid_src>/rename/<fid_dest>`` * ``POST /dossier/v1/<fid_src>/subfolder/<sfid_src>/rename/ <fid_dest>/subfolder/<sfid_dest>``
388,096
def _bld_pnab_generic(self, funcname, **kwargs): margs = {: pnab, : kwargs} setattr(self, funcname, margs)
implement's a generic version of a non-attribute based pandas function
388,097
def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True): i = 0 p = Progress() for path in paths: with open(path, ) as f: for line in f: i += 1 p.print_progress(i/n) line = line.lower() if sentences: for sent in sent_tokenize(line): tokens = tokenizer(sent) yield LabeledSentence(tokens, [.format(i)]) else: tokens = tokenizer(line) yield LabeledSentence(tokens, [.format(i)])
Generator to feed sentences to the dov2vec model.
388,098
def pbkdf2(digestmod, password, salt, count, dk_length): def pbkdf2_function(pw, salt, count, i): r = u = hmac.new(pw, salt + struct.pack(">i", i), digestmod).digest() for i in range(2, count + 1): u = hmac.new(pw, u, digestmod).digest() r = bytes(i ^ j for i, j in zip(r, u)) return r dk, h_length = b, digestmod().digest_size blocks = (dk_length // h_length) + (1 if dk_length % h_length else 0) for i in range(1, blocks + 1): dk += pbkdf2_function(password, salt, count, i) return dk[:dk_length]
PBKDF2, from PKCS #5 v2.0[1]. [1]: http://tools.ietf.org/html/rfc2898 For proper usage, see NIST Special Publication 800-132: http://csrc.nist.gov/publications/PubsSPs.html The arguments for this function are: digestmod a crypographic hash constructor, such as hashlib.sha256 which will be used as an argument to the hmac function. Note that the performance difference between sha1 and sha256 is not very big. New applications should choose sha256 or better. password The arbitrary-length password (passphrase) (bytes) salt A bunch of random bytes, generated using a cryptographically strong random number generator (such as os.urandom()). NIST recommend the salt be _at least_ 128bits (16 bytes) long. count The iteration count. Set this value as large as you can tolerate. NIST recommend that the absolute minimum value be 1000. However, it should generally be in the range of tens of thousands, or however many cause about a half-second delay to the user. dk_length The lenght of the desired key in bytes. This doesn't need to be the same size as the hash functions digest size, but it makes sense to use a larger digest hash function if your key size is large.
388,099
def find_unpaired_ligand(self): unpaired_hba, unpaired_hbd, unpaired_hal = [], [], [] involved_atoms = [hbond.a.idx for hbond in self.hbonds_pdon] + [hbond.d.idx for hbond in self.hbonds_ldon] [[involved_atoms.append(atom.idx) for atom in sb.negative.atoms] for sb in self.saltbridge_lneg] [[involved_atoms.append(atom.idx) for atom in sb.positive.atoms] for sb in self.saltbridge_pneg] [involved_atoms.append(wb.a.idx) for wb in self.water_bridges if wb.protisdon] [involved_atoms.append(wb.d.idx) for wb in self.water_bridges if not wb.protisdon] [involved_atoms.append(mcomplex.target.atom.idx) for mcomplex in self.metal_complexes if mcomplex.location == ] for atom in [hba.a for hba in self.ligand.get_hba()]: if atom.idx not in involved_atoms: unpaired_hba.append(atom) for atom in [hbd.d for hbd in self.ligand.get_hbd()]: if atom.idx not in involved_atoms: unpaired_hbd.append(atom) [involved_atoms.append(atom.don.x.idx) for atom in self.halogen_bonds] for atom in [haldon.x for haldon in self.ligand.halogenbond_don]: if atom.idx not in involved_atoms: unpaired_hal.append(atom) return unpaired_hba, unpaired_hbd, unpaired_hal
Identify unpaired functional in groups in ligands, involving H-Bond donors, acceptors, halogen bond donors.