Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
5,100
def synset(self, synset_repr): funktionieren.v.2 parts = synset_repr.split() if len(parts) != 3: return None lemma, pos, sensenum = parts if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG: return None sensenum = int(sensenum, 10) pos = SHORT_POS_TO_LONG[pos] lemma_dict = self._mongo_db.lexunits.find_one({: lemma, : pos, : sensenum}) if lemma_dict: return Lemma(self, lemma_dict).synset
Looks up a synset in GermaNet using its string representation. Arguments: - `synset_repr`: a unicode string containing the lemma, part of speech, and sense number of the first lemma of the synset >>> gn.synset(u'funktionieren.v.2') Synset(funktionieren.v.2)
5,101
def open_file(self, title="Open File", initialDir="~", fileTypes="*|All Files", rememberAs=None, **kwargs): if rememberAs is not None: return self._run_kdialog(title, ["--getopenfilename", initialDir, fileTypes, ":" + rememberAs], kwargs) else: return self._run_kdialog(title, ["--getopenfilename", initialDir, fileTypes], kwargs)
Show an Open File dialog Usage: C{dialog.open_file(title="Open File", initialDir="~", fileTypes="*|All Files", rememberAs=None, **kwargs)} @param title: window title for the dialog @param initialDir: starting directory for the file dialog @param fileTypes: file type filter expression @param rememberAs: gives an ID to this file dialog, allowing it to open at the last used path next time @return: a tuple containing the exit code and file path @rtype: C{DialogData(int, str)}
5,102
def _distance_matrix(self, a, b): def sq(x): return (x * x) matrix = sq(a[:, 0][:, None] - b[:, 0][None, :]) for x, y in zip(a.T[1:], b.T[1:]): matrix += sq(x[:, None] - y[None, :]) return matrix
Pairwise distance between each point in `a` and each point in `b`
5,103
def analyse_text(text): result = 0.0 lines = text.split() hasEndProc = False hasHeaderComment = False hasFile = False hasJob = False hasProc = False hasParm = False hasReport = False def isCommentLine(line): return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None def isEmptyLine(line): return not bool(line.strip()) while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])): if not isEmptyLine(lines[0]): hasHeaderComment = True del lines[0] if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]): result = 0.4 if hasHeaderComment: result += 0.4 else: for line in lines: words = line.split() if (len(words) >= 2): firstWord = words[0] if not hasReport: if not hasJob: if not hasFile: if not hasParm: if firstWord == : hasParm = True if firstWord == : hasFile = True if firstWord == : hasJob = True elif firstWord == : hasProc = True elif firstWord == : hasEndProc = True elif firstWord == : hasReport = True if hasJob and (hasProc == hasEndProc): if hasHeaderComment: result += 0.1 if hasParm: if hasProc: result += 0.8 else: result += 0.5 else: result += 0.11 if hasParm: result += 0.2 if hasFile: result += 0.01 if hasReport: result += 0.01 assert 0.0 <= result <= 1.0 return result
Perform a structural analysis for basic Easytrieve constructs.
5,104
def verification_email_body(case_name, url, display_name, category, subcategory, breakpoint_1, breakpoint_2, hgnc_symbol, panels, gtcalls, tx_changes, name, comment): html = .format( case_name=case_name, url=url, display_name=display_name, category=category, subcategory=subcategory, breakpoint_1=breakpoint_1, breakpoint_2=breakpoint_2, hgnc_symbol=hgnc_symbol, panels=panels, gtcalls=gtcalls, tx_changes=tx_changes, name=name, comment=comment) return html
Builds the html code for the variant verification emails (order verification and cancel verification) Args: case_name(str): case display name url(str): the complete url to the variant, accessible when clicking on the email link display_name(str): a display name for the variant category(str): category of the variant subcategory(str): sub-category of the variant breakpoint_1(str): breakpoint 1 (format is 'chr:start') breakpoint_2(str): breakpoint 2 (format is 'chr:stop') hgnc_symbol(str): a gene or a list of genes separated by comma panels(str): a gene panel of a list of panels separated by comma gtcalls(str): genotyping calls of any sample in the family tx_changes(str): amino acid changes caused by the variant, only for snvs otherwise 'Not available' name(str): user_obj['name'], uft-8 encoded comment(str): sender's comment from form Returns: html(str): the html body of the variant verification email
5,105
def clear(self): self._wlock.acquire() try: self._mapping.clear() self._queue.clear() finally: self._wlock.release()
Clear the cache.
5,106
def create_trie(self): trie = SourceRootTrie(self.source_root_factory) options = self.get_options() for category in SourceRootCategories.ALL: for pattern in options.get(.format(category), []): trie.add_pattern(pattern, category) for path, langs in options.get(.format(category), {}).items(): trie.add_fixed(path, langs, category) return trie
Create a trie of source root patterns from options. :returns: :class:`SourceRootTrie`
5,107
def edit_project(self, id_, **kwargs): data = self._wrap_dict("project", kwargs) return self.patch("/projects/{}.json".format(id_), data=data)
Edits a project by ID. All fields available at creation can be updated as well. If you want to update hourly rates retroactively, set the argument `update_hourly_rate_on_time_entries` to True.
5,108
def get_least_common_subsumer(self,from_tid,to_tid): termid_from = self.terminal_for_term.get(from_tid) termid_to = self.terminal_for_term.get(to_tid) path_from = self.paths_for_terminal[termid_from][0] path_to = self.paths_for_terminal[termid_to][0] common_nodes = set(path_from) & set(path_to) if len(common_nodes) == 0: return None else: indexes = [] for common_node in common_nodes: index1 = path_from.index(common_node) index2 = path_to.index(common_node) indexes.append((common_node,index1+index2)) indexes.sort(key=itemgetter(1)) shortest_common = indexes[0][0] return shortest_common
Returns the deepest common subsumer among two terms @type from_tid: string @param from_tid: one term id @type to_tid: string @param to_tid: another term id @rtype: string @return: the term identifier of the common subsumer
5,109
def config_control(inherit_napalm_device=None, **kwargs): * result = True comment = changed, not_changed_rsn = config_changed(inherit_napalm_device=napalm_device) if not changed: return (changed, not_changed_rsn) Will try to rollback now!commentresult\nCannot rollback! {reason}comment') ) return result, comment
Will check if the configuration was changed. If differences found, will try to commit. In case commit unsuccessful, will try to rollback. :return: A tuple with a boolean that specifies if the config was changed/committed/rollbacked on the device.\ And a string that provides more details of the reason why the configuration was not committed properly. CLI Example: .. code-block:: bash salt '*' net.config_control
5,110
def sam_readline(sock, partial = None): response = b exception = None while True: try: c = sock.recv(1) if not c: raise EOFError( % (partial, response)) elif c == b: break else: response += c except (BlockingIOError, pysocket.timeout) as e: if partial is None: raise e else: exception = e break if partial is None: return response.decode() else: return (partial + response.decode(), exception)
read a line from a sam control socket
5,111
def __tag_repo(self, data, repository): assert self.__tag_name not in [t.name for t in repository.tags] return TagReference.create(repository, self.__tag_name, message=json.dumps(data))
Tag the current repository. :param data: a dictionary containing the data about the experiment :type data: dict
5,112
def is_user(value, min=None, max=None): if type(value) == str: try: entry = pwd.getpwnam(value) value = entry.pw_uid except KeyError: err_message = (.format(value)) raise validate.VdtValueError(err_message) return value elif type(value) == int: try: pwd.getpwuid(value) except KeyError: err_message = (.format(value)) raise validate.VdtValueError(err_message) return value else: err_message = () raise validate.VdtTypeError(err_message)
Check whether username or uid as argument exists. if this function recieved username, convert uid and exec validation.
5,113
def _get_cibfile_cksum(cibname): cibfile_cksum = .format(_get_cibfile(cibname)) log.trace(, cibfile_cksum) return cibfile_cksum
Get the full path of the file containing a checksum of a CIB-file with the name of the CIB
5,114
def derive_random_states(random_state, n=1): seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0] return [new_random_state(seed_+i) for i in sm.xrange(n)]
Create N new random states based on an existing random state or seed. Parameters ---------- random_state : numpy.random.RandomState Random state or seed from which to derive new random states. n : int, optional Number of random states to derive. Returns ------- list of numpy.random.RandomState Derived random states.
5,115
def get_phrases(self, ns=None, layer=, cat_key=, cat_val=): if not ns: ns = self.ns for node_id in select_nodes_by_layer(self, .format(ns, layer)): if self.node[node_id][self.ns++cat_key] == cat_val: yield node_id
yield all node IDs that dominate the given phrase type, e.g. all NPs
5,116
def parseExternalSubset(self, ExternalID, SystemID): libxml2mod.xmlParseExternalSubset(self._o, ExternalID, SystemID)
parse Markup declarations from an external subset [30] extSubset ::= textDecl? extSubsetDecl [31] extSubsetDecl ::= (markupdecl | conditionalSect | PEReference | S) *
5,117
def __last_commit(self): cmd = [, ] op = self.sh(cmd, shell=False) if not op: return None author, rev, datestr = op.split()[7:10] author = author.split(, 1)[1].strip() rev = rev.split(, 1)[1].strip() datestr = datestr.split(, 1)[1].split(, 1)[0].strip() return datestr, (rev, author, None, None)
Retrieve the most recent commit message (with ``svn info``) Returns: tuple: (datestr, (revno, user, None, desc)) $ svn info Path: . URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python Repository Root: http://python-dlp.googlecode.com/svn Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d Revision: 378 Node Kind: directory Schedule: normal Last Changed Author: chimezie Last Changed Rev: 378 Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011)
5,118
def find_tool(name, additional_paths = [], path_last = False): assert isinstance(name, basestring) assert is_iterable_typed(additional_paths, basestring) assert isinstance(path_last, (int, bool)) programs = path.programs_path() match = path.glob(programs, [name, name + ]) additional_match = path.glob(additional_paths, [name, name + ]) result = [] if path_last: result = additional_match if not result and match: result = match else: if match: result = match elif additional_match: result = additional_match if result: return path.native(result[0]) else: return
Attempts to find tool (binary) named 'name' in PATH and in 'additional-paths'. If found in path, returns 'name'. If found in additional paths, returns full name. If the tool is found in several directories, returns the first path found. Otherwise, returns the empty string. If 'path_last' is specified, path is checked after 'additional_paths'.
5,119
def resolve_relative_paths(paths: List[str]) -> Dict[str, str]: buck_root = find_buck_root(os.getcwd()) if buck_root is None: LOG.error( "Buck root couldn't be found. Returning empty analysis directory mapping." ) return {} command = [ "buck", "query", "--json", "--output-attribute", ".*", "owner(%s)", *paths, ] try: output = json.loads( subprocess.check_output(command, timeout=30, stderr=subprocess.DEVNULL) .decode() .strip() ) except ( subprocess.TimeoutExpired, subprocess.CalledProcessError, json.decoder.JSONDecodeError, ) as error: raise BuckException("Querying buck for relative paths failed: {}".format(error)) results = {} for path in paths: for owner in output.values(): prefix = os.path.join(buck_root, owner["buck.base_path"]) + os.sep if not path.startswith(prefix): continue suffix = path[len(prefix) :] if suffix not in owner["srcs"]: continue if "buck.base_module" in owner: base_path = os.path.join(*owner["buck.base_module"].split(".")) else: base_path = owner["buck.base_path"] results[path] = os.path.join(base_path, owner["srcs"][suffix]) break return results
Query buck to obtain a mapping from each absolute path to the relative location in the analysis directory.
5,120
def get_spam_checker(backend_path): try: backend_module = import_module(backend_path) backend = getattr(backend_module, ) except (ImportError, AttributeError): warnings.warn( % backend_path, RuntimeWarning) backend = None except ImproperlyConfigured as e: warnings.warn(str(e), RuntimeWarning) backend = None return backend
Return the selected spam checker backend.
5,121
def ssh_file(opts, dest_path, contents=None, kwargs=None, local_file=None): if opts.get(, ) == : return sftp_file(dest_path, contents, kwargs, local_file) return scp_file(dest_path, contents, kwargs, local_file)
Copies a file to the remote SSH target using either sftp or scp, as configured.
5,122
def unique_list(input_list): r output_list = [] if len(input_list) > 0: dim = _sp.shape(input_list)[1] for i in input_list: match = False for j in output_list: if dim == 3: if i[0] == j[0] and i[1] == j[1] and i[2] == j[2]: match = True elif dim == 2: if i[0] == j[0] and i[1] == j[1]: match = True elif dim == 1: if i[0] == j[0]: match = True if match is False: output_list.append(i) return output_list
r""" For a given list (of points) remove any duplicates
5,123
def _tensor_product(t1, t2): return tf.matmul(tf.expand_dims(t1, axis=-1), tf.expand_dims(t2, axis=-2))
Computes the outer product of two possibly batched vectors. Args: t1: A `tf.Tensor` of shape `[..., n]`. t2: A `tf.Tensor` of shape `[..., m]`. Returns: A tensor of shape `[..., n, m]` with matching batch dimensions, let's call it `r`, whose components are: ```None r[..., i, j] = t1[..., i] * t2[..., j] ```
5,124
def _set_kernel_manager(self, kernel_manager): old_manager = self._kernel_manager if old_manager is not None: old_manager.started_kernel.disconnect(self._started_kernel) old_manager.started_channels.disconnect(self._started_channels) old_manager.stopped_channels.disconnect(self._stopped_channels) kernel_manager.sub_channel.message_received.connect(self._dispatch) kernel_manager.shell_channel.message_received.connect(self._dispatch) kernel_manager.stdin_channel.message_received.connect(self._dispatch) kernel_manager.hb_channel.kernel_died.connect(self._handle_kernel_died) if kernel_manager.channels_running: self._started_channels()
Disconnect from the current kernel manager (if any) and set a new kernel manager.
5,125
def inserir(self, name): division_dc_map = dict() division_dc_map[] = name code, xml = self.submit( {: division_dc_map}, , ) return self.response(code, xml)
Inserts a new Division Dc and returns its identifier. :param name: Division Dc name. String with a minimum 2 and maximum of 80 characters :return: Dictionary with the following structure: :: {'division_dc': {'id': < id_division_dc >}} :raise InvalidParameterError: Name is null and invalid. :raise NomeDivisaoDcDuplicadoError: There is already a registered Division Dc with the value of name. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
5,126
def _write_zip(self, func_src, fpath): now = datetime.now() zi_tup = (now.year, now.month, now.day, now.hour, now.minute, now.second) logger.debug(, zi_tup) zinfo = zipfile.ZipInfo(, zi_tup) zinfo.external_attr = 0x0755 << 16 logger.debug(, zinfo.external_attr) logger.debug(, fpath) with zipfile.ZipFile(fpath, ) as z: z.writestr(zinfo, func_src)
Write the function source to a zip file, suitable for upload to Lambda. Note there's a bit of undocumented magic going on here; Lambda needs the execute bit set on the module with the handler in it (i.e. 0755 or 0555 permissions). There doesn't seem to be *any* documentation on how to do this in the Python docs. The only real hint comes from the source code of ``zipfile.ZipInfo.from_file()``, which includes: st = os.stat(filename) ... zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes :param func_src: lambda function source :type func_src: str :param fpath: path to write the zip file at :type fpath: str
5,127
def add_key_filters(self, key_filters): if self._input_mode == : raise ValueError() self._key_filters.extend(key_filters) return self
Adds key filters to the inputs. :param key_filters: a list of filters :type key_filters: list :rtype: :class:`RiakMapReduce`
5,128
def pretty_print(rows, keyword, domain): if isinstance(rows, dict): pretty_print_domain(rows, keyword, domain) elif isinstance(rows, list): pretty_print_zones(rows)
rows is list when get domains dict when get specific domain
5,129
def get_common_path(pathlist): common = osp.normpath(osp.commonprefix(pathlist)) if len(common) > 1: if not osp.isdir(common): return abspardir(common) else: for path in pathlist: if not osp.isdir(osp.join(common, path[len(common)+1:])): return abspardir(common) else: return osp.abspath(common)
Return common path for all paths in pathlist
5,130
def process_ipvsix_frame(self, id=None, msg=None): df = json_normalize(msg) dt = json.loads(df.to_json()) flat_msg = {} for k in dt: new_key = "ipv6_{}".format(k) flat_msg[new_key] = dt[k]["0"] if new_key not in self.ipvsix_keys: self.ipvsix_keys[new_key] = k dt["ipv6_id"] = id self.all_ipvsix.append(dt) log.debug("IPV6 data updated:") log.debug(self.ipvsix_keys) log.debug(self.all_ipvsix) log.debug("") return flat_msg
process_ipvsix_frame Convert a complex nested json dictionary to a flattened dictionary and capture all unique keys for table construction :param id: key for this msg :param msg: ipv6 frame for packet
5,131
def GetFeatureService(self, itemId, returnURLOnly=False): admin = None item = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) if self._securityHandler.valid == False: self._valid = self._securityHandler.valid self._message = self._securityHandler.message return None item = admin.content.getItem(itemId=itemId) if item.type == "Feature Service": if returnURLOnly: return item.url else: fs = arcrest.agol.FeatureService( url=item.url, securityHandler=self._securityHandler) if fs.layers is None or len(fs.layers) == 0 : fs = arcrest.ags.FeatureService( url=item.url) return fs return None except: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "GetFeatureService", "line": line, "filename": filename, "synerror": synerror, } ) finally: admin = None item = None del item del admin gc.collect()
Obtains a feature service by item ID. Args: itemId (str): The feature service's item ID. returnURLOnly (bool): A boolean value to return the URL of the feature service. Defaults to ``False``. Returns: When ``returnURLOnly`` is ``True``, the URL of the feature service is returned. When ``False``, the result from :py:func:`arcrest.agol.services.FeatureService` or :py:func:`arcrest.ags.services.FeatureService`.
5,132
def source_csv_to_pandas(path, table, read_csv_args=None): if not in table: table += if isinstance(path, dict): data_obj = path[table] f = data_obj.split("\n") else: if os.path.isdir(path): f = open(os.path.join(path, table)) else: z = zipfile.ZipFile(path) for path in z.namelist(): if table in path: table = path break try: f = zip_open(z, table) except KeyError as e: return pd.DataFrame() if read_csv_args: df = pd.read_csv(**read_csv_args) else: df = pd.read_csv(f) return df
Parameters ---------- path: str path to directory or zipfile table: str name of table read_csv_args: string arguments passed to the read_csv function Returns ------- df: pandas:DataFrame
5,133
def _init_goslims(self, dagslim): go2obj_main = self.gosubdag.go2obj go2obj_slim = {go for go, o in dagslim.items() if go in go2obj_main} if self.gosubdag.relationships: return self._get_goslimids_norel(go2obj_slim) return set(dagslim.keys())
Get GO IDs in GO slims.
5,134
def as_dict(self): d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__} if self.x is not None: d["x"] = self.x.as_dict() if self.c is not None: d["c"] = self.c.as_dict() if self.xc is not None: d["xc"] = self.xc.as_dict() return d
Makes XcFunc obey the general json interface used in pymatgen for easier serialization.
5,135
def get_week_URL(date, day=0): if day < 1: day = 1 date = datetime(year=date.year, month=date.month, day=day, tzinfo=utc) return reverse(, kwargs={: date.isocalendar()[0], : date.isocalendar()[1]})
Returns the week view URL for a given date. :param date: A date instance. :param day: Day number in a month.
5,136
def number(self, assignment_class=None, namespace=): if assignment_class == : n = str(DatasetNumber()) return n
Return a new number. :param assignment_class: Determines the length of the number. Possible values are 'authority' (3 characters) , 'registered' (5) , 'unregistered' (7) and 'self' (9). Self assigned numbers are random and acquired locally, while the other assignment classes use the number server defined in the configuration. If None, then look in the number server configuration for one of the class keys, starting with the longest class and working to the shortest. :param namespace: The namespace character, the first character in the number. Can be one of 'd', 'x' or 'b' :return:
5,137
def gauss_fit(X, Y): X = np.asarray(X) Y = np.asarray(Y) Y[Y < 0] = 0 def gauss(x, a, x0, sigma): return a * np.exp(-(x - x0)**2 / (2 * sigma**2)) mean = (X * Y).sum() / Y.sum() sigma = np.sqrt((Y * ((X - mean)**2)).sum() / Y.sum()) height = Y.max() return curve_fit(gauss, X, Y, p0=[height, mean, sigma])
Fit the function to a gaussian. Parameters ---------- X: 1d array X values Y: 1d array Y values Returns ------- (The return from scipy.optimize.curve_fit) popt : array Optimal values for the parameters pcov : 2d array The estimated covariance of popt. Notes ----- /!\ This uses a slow curve_fit function! do not use if need speed!
5,138
def get_raw_data(self, url, *args, **kwargs): res = self._conn.get(url, headers=self._prepare_headers(**kwargs)) if res.status_code == 200: return res.content else: return None
Gets data from url as bytes Returns content under the provided url as bytes ie. for binary data Args: **url**: address of the wanted data .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: bytes
5,139
def GetAttachmentIdFromMediaId(media_id): altchars = if not six.PY2: altchars = altchars.encode() buffer = base64.b64decode(str(media_id), altchars) resoure_id_length = 20 attachment_id = if len(buffer) > resoure_id_length: attachment_id = base64.b64encode(buffer[0:resoure_id_length], altchars) if not six.PY2: attachment_id = attachment_id.decode() else: attachment_id = media_id return attachment_id
Gets attachment id from media id. :param str media_id: :return: The attachment id from the media id. :rtype: str
5,140
def read(self, size=None): if not self._is_open: raise IOError() if self._current_offset < 0: raise IOError() if self._current_offset >= self._size: return b if size is None or self._current_offset + size > self._size: size = self._size - self._current_offset if self._tsk_attribute: data = self._tsk_file.read_random( self._current_offset, size, self._tsk_attribute.info.type, self._tsk_attribute.info.id) else: data = self._tsk_file.read_random(self._current_offset, size) self._current_offset += len(data) return data
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
5,141
def _listen(self): def _listen(): for line in iter(sys.stdin.readline, b""): try: response = json.loads(line) except Exception as e: thread = threading.Thread(target=_listen) thread.daemon = True thread.start()
Listen for messages passed from parent This method distributes messages received via stdin to their corresponding channel. Based on the format of the incoming message, the message is forwarded to its corresponding channel to be processed by its corresponding handler.
5,142
def delete_affinity_group(self, affinity_group_name): _validate_not_none(, affinity_group_name) return self._perform_delete( + self.subscription_id + \ + \ _str(affinity_group_name))
Deletes an affinity group in the specified subscription. affinity_group_name: The name of the affinity group.
5,143
def error_codes(self): if self._error_codes is None: from .tcex_error_codes import TcExErrorCodes self._error_codes = TcExErrorCodes() return self._error_codes
ThreatConnect error codes.
5,144
def decrypt(self, msg): error = False signature = msg[0:SHA256.digest_size] iv = msg[SHA256.digest_size:SHA256.digest_size + AES.block_size] cipher_text = msg[SHA256.digest_size + AES.block_size:] if self.sign(iv + cipher_text) != signature: error = True ctr = Counter.new(AES.block_size * 8, initial_value=self.bin2long(iv)) cipher = AES.AESCipher(self._cipherkey, AES.MODE_CTR, counter=ctr) plain_text = cipher.decrypt(cipher_text) if error: raise DecryptionError return plain_text
decrypt a message
5,145
def info(model=None, markdown=False, silent=False): msg = Printer() if model: if util.is_package(model): model_path = util.get_package_path(model) else: model_path = util.get_data_path() / model meta_path = model_path / "meta.json" if not meta_path.is_file(): msg.fail("Can{}'".format(model) model_meta = { k: v for k, v in meta.items() if k not in ("accuracy", "speed") } if markdown: print_markdown(model_meta, title=title) else: msg.table(model_meta, title=title) return meta data = { "spaCy version": about.__version__, "Location": path2str(Path(__file__).parent.parent), "Platform": platform.platform(), "Python version": platform.python_version(), "Models": list_models(), } if not silent: title = "Info about spaCy" if markdown: print_markdown(data, title=title) else: msg.table(data, title=title) return data
Print info about spaCy installation. If a model shortcut link is speficied as an argument, print model information. Flag --markdown prints details in Markdown for easy copy-pasting to GitHub issues.
5,146
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_rbridge_id(self, **kwargs): config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") rbridge_id = ET.SubElement(cluster_fwdl_entries, "rbridge-id") rbridge_id.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
5,147
def is_address_here(self, address): base = self.get_base() size = self.get_size() if base and size: return base <= address < (base + size) return None
Tries to determine if the given address belongs to this module. @type address: int @param address: Memory address. @rtype: bool or None @return: C{True} if the address belongs to the module, C{False} if it doesn't, and C{None} if it can't be determined.
5,148
def get_msms_df_on_file(pdb_file, outfile=None, outdir=None, outext=, force_rerun=False): outfile = ssbio.utils.outfile_maker(inname=pdb_file, outname=outfile, outdir=outdir, outext=outext) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): my_structure = StructureIO(pdb_file) model = my_structure.first_model df = get_msms_df(model, pdb_id=op.splitext(op.basename(pdb_file))[0], outfile=outfile, outdir=outdir, outext=outext, force_rerun=force_rerun) else: log.debug(.format(outfile, force_rerun)) df = pd.read_csv(outfile, index_col=0) return df
Run MSMS (using Biopython) on a PDB file. Saves a CSV file of: chain: chain ID resnum: residue number (PDB numbering) icode: residue insertion code res_depth: average depth of all atoms in a residue ca_depth: depth of the alpha carbon atom Depths are in units Angstroms. 1A = 10^-10 m = 1nm Args: pdb_file: Path to PDB file outfile: Optional name of output file (without extension) outdir: Optional output directory outext: Optional extension for the output file outext: Suffix appended to json results file force_rerun: Rerun MSMS even if results exist already Returns: Pandas DataFrame: ResidueDepth property_dict, reformatted
5,149
def msg_curse(self, args=None, max_width=None): ret = [] if not self.stats or self.args.percpu or self.is_disable(): return ret idle_tag = not in self.stats msg = .format() ret.append(self.curse_add_line(msg, "TITLE")) trend_user = self.get_trend() trend_system = self.get_trend() if trend_user is None or trend_user is None: trend_cpu = None else: trend_cpu = trend_user + trend_system msg = .format(self.trend_msg(trend_cpu)) ret.append(self.curse_add_line(msg)) msg = .format(self.stats[]) if idle_tag: ret.append(self.curse_add_line( msg, self.get_views(key=, option=))) else: ret.append(self.curse_add_line(msg)) if in self.stats: msg = .format() ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) msg = .format(self.stats[]) ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) if in self.stats: msg = .format() ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) msg = .format(self.auto_unit(int(self.stats[] // self.stats[]), min_symbol=)) ret.append(self.curse_add_line( msg, self.get_views(key=, option=), optional=self.get_views(key=, option=))) ret.append(self.curse_new_line()) if in self.stats: msg = .format() ret.append(self.curse_add_line(msg)) msg = .format(self.stats[]) ret.append(self.curse_add_line( msg, self.get_views(key=, option=))) elif in self.stats: msg = .format() ret.append(self.curse_add_line(msg)) msg = .format(self.stats[]) ret.append(self.curse_add_line(msg)) if in self.stats: msg = .format() ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) msg = .format(self.stats[]) ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) if in self.stats: msg = .format() ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) msg = .format(int(self.stats[] // self.stats[])) ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) ret.append(self.curse_new_line()) if in self.stats and not idle_tag: msg = .format() ret.append(self.curse_add_line(msg)) msg = .format(self.stats[]) ret.append(self.curse_add_line( msg, self.get_views(key=, option=))) else: msg = .format() ret.append(self.curse_add_line(msg)) msg = .format(self.stats[]) ret.append(self.curse_add_line(msg)) if in self.stats: msg = .format() ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) msg = .format(self.stats[]) ret.append(self.curse_add_line( msg, self.get_views(key=, option=), optional=self.get_views(key=, option=))) if in self.stats: msg = .format() ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) msg = .format(int(self.stats[] // self.stats[])) ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) ret.append(self.curse_new_line()) if in self.stats and not idle_tag: msg = .format() ret.append(self.curse_add_line(msg)) msg = .format(self.stats[]) ret.append(self.curse_add_line(msg)) if in self.stats: msg = .format() ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) msg = .format(self.stats[]) ret.append(self.curse_add_line( msg, self.get_views(key=, option=), optional=self.get_views(key=, option=))) if in self.stats and not LINUX: msg = .format() ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) msg = .format(int(self.stats[] // self.stats[])) ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=))) return ret
Return the list to display in the UI.
5,150
def translate_exception(exc_info, initial_skip=0): tb = exc_info[2] frames = [] for x in range(initial_skip): if tb is not None: tb = tb.tb_next initial_tb = tb while tb is not None: if not frames: reraise(exc_info[0], exc_info[1], exc_info[2]) return ProcessedTraceback(exc_info[0], exc_info[1], frames)
If passed an exc_info it will automatically rewrite the exceptions all the way down to the correct line numbers and frames.
5,151
def parse_from_json(json_str): try: message_dict = json.loads(json_str) except ValueError: raise ParseError("Mal-formed JSON input.") upload_keys = message_dict.get(, False) if upload_keys is False: raise ParseError( "uploadKeys does not exist. At minimum, an empty array is required." ) elif not isinstance(upload_keys, list): raise ParseError( "uploadKeys must be an array object." ) upload_type = message_dict[] try: if upload_type == : return orders.parse_from_dict(message_dict) elif upload_type == : return history.parse_from_dict(message_dict) else: raise ParseError( % upload_type) except TypeError as exc: raise ParseError(exc.message)
Given a Unified Uploader message, parse the contents and return a MarketOrderList or MarketHistoryList instance. :param str json_str: A Unified Uploader message as a JSON string. :rtype: MarketOrderList or MarketHistoryList :raises: MalformedUploadError when invalid JSON is passed in.
5,152
def create_unary_node(self, operator, child, param=None, schema=None): if operator == self.grammar.syntax.select_op: conditions = .join(flatten(param)) node = SelectNode(child, conditions) elif operator == self.grammar.syntax.project_op: node = ProjectNode(child, param) elif operator == self.grammar.syntax.rename_op: name = None attributes = [] if isinstance(param[0], str): name = param.pop(0) if param: attributes = param[0] node = RenameNode(child, name, attributes, schema) elif operator == self.grammar.syntax.assign_op: name = param[0] attributes = [] if len(param) < 2 else param[1] node = AssignNode(child, name, attributes, schema) else: raise ValueError return node
Return a Unary Node whose type depends on the specified operator. :param schema: :param child: :param operator: A relational algebra operator (see constants.py) :param param: A list of parameters for the operator. :return: A Unary Node.
5,153
def get_translatable_children(self, obj): collector = NestedObjects(using=) collector.collect([obj]) object_list = collector.nested() items = self.get_elements(object_list) return items[1:]
Obtain all the translatable children from "obj" :param obj: :return:
5,154
def package(self): cmake = self.configure_cmake() cmake.install() self.copy(pattern="LICENSE.txt", dst="licenses") self.copy(pattern="FindFlatBuffers.cmake", dst=os.path.join("lib", "cmake", "flatbuffers"), src="CMake") self.copy(pattern="flathash*", dst="bin", src="bin") self.copy(pattern="flatc*", dst="bin", src="bin") if self.settings.os == "Windows" and self.options.shared: if self.settings.compiler == "Visual Studio": shutil.move(os.path.join(self.package_folder, "lib", "%s.dll" % self.name), os.path.join(self.package_folder, "bin", "%s.dll" % self.name)) elif self.settings.compiler == "gcc": shutil.move(os.path.join(self.package_folder, "lib", "lib%s.dll" % self.name), os.path.join(self.package_folder, "bin", "lib%s.dll" % self.name))
Copy Flatbuffers' artifacts to package folder
5,155
def _get_sorted_action_keys(self, keys_list): action_list = [] for key in keys_list: if key.startswith(): action_list.append(key) action_list.sort() return action_list
This function returns only the elements starting with 'action-' in 'keys_list'. The returned list is sorted by the index appended to the end of each element
5,156
def update(self, prms): with self.sess.as_default(): fetches = [] feeds = {} for name, value in six.iteritems(prms): assert name in self.name_map var = self.name_map[name] fetches.append(var.initializer) feeds[var.initializer.inputs[1]] = SessionUpdate.relaxed_value_for_var(value, var) self.sess.run(fetches, feed_dict=feeds)
Args: prms(dict): dict of {variable name: value} Any name in prms must be in the graph and in vars_to_update.
5,157
def zip_job(job_ini, archive_zip=, risk_ini=, oq=None, log=logging.info): if not os.path.exists(job_ini): sys.exit( % job_ini) archive_zip = archive_zip or if isinstance(archive_zip, str): if not archive_zip.endswith(): sys.exit( % archive_zip) if os.path.exists(archive_zip): sys.exit( % archive_zip) oq = oq or readinput.get_oqparam(job_ini, validate=False) if risk_ini: risk_ini = os.path.normpath(os.path.abspath(risk_ini)) risk_inputs = readinput.get_params([risk_ini])[] del risk_inputs[] oq.inputs.update(risk_inputs) files = readinput.get_input_files(oq) if risk_ini: files = [risk_ini] + files return general.zipfiles(files, archive_zip, log=log)
Zip the given job.ini file into the given archive, together with all related files.
5,158
def get_dataset(self, key, info): if key.name in [, ]: logger.debug() if self.lons is None or self.lats is None: self.lons, self.lats = self.get_lonlats() if key.name == : proj = Dataset(self.lats, id=key, **info) else: proj = Dataset(self.lons, id=key, **info) else: data = self.get_sds_variable(key.name) proj = Dataset(data, id=key, **info) return proj
Read data from file and return the corresponding projectables.
5,159
def separator_line(cls, sep=, size=10): if cls.intty(): cls.echo(sep * size)
Display a separator line.
5,160
def open(self, pathobj): url = str(pathobj) raw, code = self.rest_get_stream(url, auth=pathobj.auth, verify=pathobj.verify, cert=pathobj.cert) if not code == 200: raise RuntimeError("%d" % code) return raw
Opens the remote file and returns a file-like object HTTPResponse Given the nature of HTTP streaming, this object doesn't support seek()
5,161
def put(self, url, json=None, data=None, **kwargs): check_type(url, basestring, may_be_none=False) erc = kwargs.pop(, EXPECTED_RESPONSE_CODE[]) response = self.request(, url, erc, json=json, data=data, **kwargs) return extract_and_parse_json(response)
Sends a PUT request. Args: url(basestring): The URL of the API endpoint. json: Data to be sent in JSON format in tbe body of the request. data: Data to be sent in the body of the request. **kwargs: erc(int): The expected (success) response code for the request. others: Passed on to the requests package. Raises: ApiError: If anything other than the expected response code is returned by the Webex Teams API endpoint.
5,162
def create(self, deal_id, *args, **kwargs): if not args and not kwargs: raise Exception() attributes = args[0] if args else kwargs attributes = dict((k, v) for k, v in attributes.iteritems() if k in self.OPTS_KEYS_TO_PERSIST) _, _, associated_contact = self.http_client.post("/deals/{deal_id}/associated_contacts".format(deal_id=deal_id), body=attributes) return associated_contact
Create an associated contact Creates a deal's associated contact and its role If the specified deal or contact does not exist, the request will return an error :calls: ``post /deals/{deal_id}/associated_contacts`` :param int deal_id: Unique identifier of a Deal. :param tuple *args: (optional) Single object representing AssociatedContact resource. :param dict **kwargs: (optional) AssociatedContact attributes. :return: Dictionary that support attriubte-style access and represents newely created AssociatedContact resource. :rtype: dict
5,163
def envGet(self, name, default=None, conv=None): if self._env.has_key(name): if conv is not None: return conv(self._env.get(name)) else: return self._env.get(name) else: return default
Return value for environment variable or None. @param name: Name of environment variable. @param default: Default value if variable is undefined. @param conv: Function for converting value to desired type. @return: Value of environment variable.
5,164
def crop_at_zero_crossing(gen, seconds=5, error=0.1): source = iter(gen) buffer_length = int(2 * error * sampler.FRAME_RATE) start = itertools.islice(source, 0, int((seconds - error) * sampler.FRAME_RATE)) end = itertools.islice(source, 0, buffer_length) for sample in start: yield sample end = list(end) best = sorted(enumerate(end), key=lambda x: (math.fabs(x[1]),abs((buffer_length/2)-x[0]))) print best[:10] print best[0][0] for sample in end[:best[0][0] + 1]: yield sample
Crop the generator, ending at a zero-crossing Crop the generator to produce approximately seconds seconds (default 5s) of audio at the provided FRAME_RATE, attempting to end the clip at a zero crossing point to avoid clicking.
5,165
def chunkify(chunksize): def chunkifier(func): def wrap(*args): assert len(args) > 0 assert all(len(a.flat) == len(args[0].flat) for a in args) nelements = len(args[0].flat) nchunks, remain = divmod(nelements, chunksize) out = np.ndarray(args[0].shape) for start in range(0, nelements, chunksize): stop = start+chunksize if start+chunksize > nelements: stop = nelements-start iargs = tuple(a.flat[start:stop] for a in args) out.flat[start:stop] = func(*iargs) return out return wrap return chunkifier
Very stupid "chunk vectorizer" which keeps memory use down. This version requires all inputs to have the same number of elements, although it shouldn't be that hard to implement simple broadcasting.
5,166
def to_nullable_map(value): if value == None: return None try: value = json.loads(value) return RecursiveMapConverter.to_nullable_map(value) except: return None
Converts JSON string into map object or returns null when conversion is not possible. :param value: the JSON string to convert. :return: Map object value or null when conversion is not supported.
5,167
def uninstall(pkg): * ret = {: None, : } out = __salt__[](FLATPAK_BINARY_NAME + + pkg) if out[] and out[]: ret[] = out[].strip() ret[] = False else: ret[] = out[].strip() ret[] = True return ret
Uninstall the specified package. Args: pkg (str): The package name. Returns: dict: The ``result`` and ``output``. CLI Example: .. code-block:: bash salt '*' flatpak.uninstall org.gimp.GIMP
5,168
def get_fallback_coord(self,isotope=,masslimit=0.1,masscutlim=False,delay=True): s prescription. MINImass cutNi-56O-16found new remnantni56:Ni-56 does not reach 0.1Msun, take old remnant return minis, fallback_coords
Returns fallback mass coordinate so that the amount of masslimit of the isotope isotope is ejected. Explosion type chosen with delay option. masscutlim: If true, new fallback coordinate can only as small as the original fallback prescription by C. Fryer. Useful for more massive stars which would not eject any metals with Freyer's prescription.
5,169
def decode_devid(devid, pname): devid devid = int(devid) if devid == 0: return bus_type=devid & 0x07 bus=(devid>>3) & 0x1F address=(devid>>8)&0xFF devtype=(devid>>16) bustypes = { 1: "I2C", 2: "SPI", 3: "UAVCAN", 4: "SITL" } compass_types = { 0x01 : "DEVTYPE_HMC5883_OLD", 0x07 : "DEVTYPE_HMC5883", 0x02 : "DEVTYPE_LSM303D", 0x04 : "DEVTYPE_AK8963 ", 0x05 : "DEVTYPE_BMM150 ", 0x06 : "DEVTYPE_LSM9DS1", 0x08 : "DEVTYPE_LIS3MDL", 0x09 : "DEVTYPE_AK09916", 0x0A : "DEVTYPE_IST8310", 0x0B : "DEVTYPE_ICM20948", 0x0C : "DEVTYPE_MMC3416", 0x0D : "DEVTYPE_QMC5883L", 0x0E : "DEVTYPE_MAG3110", 0x0F : "DEVTYPE_SITL", 0x10 : "DEVTYPE_IST8308", 0x11 : "DEVTYPE_RM3100", } imu_types = { 0x09 : "DEVTYPE_BMI160", 0x10 : "DEVTYPE_L3G4200D", 0x11 : "DEVTYPE_ACC_LSM303D", 0x12 : "DEVTYPE_ACC_BMA180", 0x13 : "DEVTYPE_ACC_MPU6000", 0x16 : "DEVTYPE_ACC_MPU9250", 0x17 : "DEVTYPE_ACC_IIS328DQ", 0x21 : "DEVTYPE_GYR_MPU6000", 0x22 : "DEVTYPE_GYR_L3GD20", 0x24 : "DEVTYPE_GYR_MPU9250", 0x25 : "DEVTYPE_GYR_I3G4250D", 0x26 : "DEVTYPE_GYR_LSM9DS1", 0x27 : "DEVTYPE_INS_ICM20789", 0x28 : "DEVTYPE_INS_ICM20689", 0x29 : "DEVTYPE_INS_BMI055", 0x2A : "DEVTYPE_SITL", 0x2B : "DEVTYPE_INS_BMI088", 0x2C : "DEVTYPE_INS_ICM20948", 0x2D : "DEVTYPE_INS_ICM20648", 0x2E : "DEVTYPE_INS_ICM20649", 0x2F : "DEVTYPE_INS_ICM20602", } decoded_devname = "" if pname.startswith("COMPASS"): decoded_devname = compass_types.get(devtype, "UNKNOWN") if pname.startswith("INS"): decoded_devname = imu_types.get(devtype, "UNKNOWN") print("%s: bus_type:%s(%u) bus:%u address:%u(0x%x) devtype:%u(0x%x) %s" % ( pname, bustypes.get(bus_type,"UNKNOWN"), bus_type, bus, address, address, devtype, devtype, decoded_devname))
decode one device ID. Used for 'devid' command in mavproxy and MAVExplorer
5,170
def set_mode_loiter(self): if self.mavlink10(): self.mav.command_long_send(self.target_system, self.target_component, mavlink.MAV_CMD_NAV_LOITER_UNLIM, 0, 0, 0, 0, 0, 0, 0, 0) else: MAV_ACTION_LOITER = 27 self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_LOITER)
enter LOITER mode
5,171
def rvs(self, size=1, param=None): if param is not None: dtype = [(param, float)] else: dtype = [(p, float) for p in self.params] arr = numpy.zeros(size, dtype=dtype) for (p,_) in dtype: offset = numpy.power(self._bounds[p][0], self.dim) factor = numpy.power(self._bounds[p][1], self.dim) - \ numpy.power(self._bounds[p][0], self.dim) arr[p] = numpy.random.uniform(0.0, 1.0, size=size) arr[p] = numpy.power(factor * arr[p] + offset, 1.0 / self.dim) return arr
Gives a set of random values drawn from this distribution. Parameters ---------- size : {1, int} The number of values to generate; default is 1. param : {None, string} If provided, will just return values for the given parameter. Otherwise, returns random values for each parameter. Returns ------- structured array The random values in a numpy structured array. If a param was specified, the array will only have an element corresponding to the given parameter. Otherwise, the array will have an element for each parameter in self's params.
5,172
def create_supercut_in_batches(composition, outputfile, padding): total_clips = len(composition) start_index = 0 end_index = BATCH_SIZE batch_comp = [] while start_index < total_clips: filename = outputfile + + str(start_index) + try: create_supercut(composition[start_index:end_index], filename, padding) batch_comp.append(filename) gc.collect() start_index += BATCH_SIZE end_index += BATCH_SIZE except: start_index += BATCH_SIZE end_index += BATCH_SIZE next clips = [VideoFileClip(filename) for filename in batch_comp] video = concatenate(clips) video.to_videofile(outputfile, codec="libx264", temp_audiofile=, remove_temp=True, audio_codec=) for filename in batch_comp: os.remove(filename) cleanup_log_files(outputfile)
Create & concatenate video clips in groups of size BATCH_SIZE and output finished video file to output directory.
5,173
def execute(self): from ambry.mprlib import execute_sql execute_sql(self._bundle.library, self.record_content)
Executes all sql statements from bundle.sql.
5,174
def take_snapshot(self, entity_id, entity, last_event_version): snapshot = Snapshot( originator_id=entity_id, originator_version=last_event_version, topic=get_topic(entity.__class__), state=None if entity is None else deepcopy(entity.__dict__) ) self.snapshot_store.store(snapshot) return snapshot
Creates a Snapshot from the given state, and appends it to the snapshot store. :rtype: Snapshot
5,175
def p(i, sample_size, weights): weight_i = weights[i] weights_sum = sum(weights) other_weights = list(weights) del other_weights[i] probability_of_i = 0 for picks in range(0, sample_size): permutations = list(itertools.permutations(other_weights, picks)) permutation_probabilities = [] for permutation in permutations: pick_probabilities = [] pick_weight_sum = weights_sum for pick in permutation: pick_probabilities.append(pick / pick_weight_sum) pick_weight_sum -= pick pick_probabilities += [weight_i / pick_weight_sum] permutation_probability = reduce( lambda x, y: x * y, pick_probabilities ) permutation_probabilities.append(permutation_probability) probability_of_i += sum(permutation_probabilities) return probability_of_i
Given a weighted set and sample size return the probabilty that the weight `i` will be present in the sample. Created to test the output of the `SomeOf` maker class. The math was provided by Andy Blackshaw - thank you dad :)
5,176
def jsonify(self, obj, many=sentinel, *args, **kwargs): if many is sentinel: many = self.many if _MARSHMALLOW_VERSION_INFO[0] >= 3: data = self.dump(obj, many=many) else: data = self.dump(obj, many=many).data return flask.jsonify(data, *args, **kwargs)
Return a JSON response containing the serialized data. :param obj: Object to serialize. :param bool many: Whether `obj` should be serialized as an instance or as a collection. If unset, defaults to the value of the `many` attribute on this Schema. :param kwargs: Additional keyword arguments passed to `flask.jsonify`. .. versionchanged:: 0.6.0 Takes the same arguments as `marshmallow.Schema.dump`. Additional keyword arguments are passed to `flask.jsonify`. .. versionchanged:: 0.6.3 The `many` argument for this method defaults to the value of the `many` attribute on the Schema. Previously, the `many` argument of this method defaulted to False, regardless of the value of `Schema.many`.
5,177
def ExamineEvent(self, mediator, event): event_data_type = getattr(event, , ) if event_data_type == : service = WindowsService.FromEvent(event) self._service_collection.AddService(service)
Analyzes an event and creates Windows Services as required. At present, this method only handles events extracted from the Registry. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
5,178
def hdict(self, hashroot): hfiles = self.keys(hashroot + "/*") hfiles.sort() last = len(hfiles) and hfiles[-1] or if last.endswith(): hfiles = [last] + hfiles[:-1] all = {} for f in hfiles: try: all.update(self[f]) except KeyError: print "Corrupt",f,"deleted - hset is not threadsafe!" del self[f] self.uncache(f) return all
Get all data contained in hashed category 'hashroot' as dict
5,179
def build_masters( filename, master_dir, designspace_instance_dir=None, designspace_path=None, family_name=None, propagate_anchors=True, minimize_glyphs_diffs=False, normalize_ufos=False, create_background_layers=False, generate_GDEF=True, store_editor_state=True, ): font = GSFont(filename) if not os.path.isdir(master_dir): os.mkdir(master_dir) if designspace_instance_dir is None: instance_dir = None else: instance_dir = os.path.relpath(designspace_instance_dir, master_dir) designspace = to_designspace( font, family_name=family_name, propagate_anchors=propagate_anchors, instance_dir=instance_dir, minimize_glyphs_diffs=minimize_glyphs_diffs, generate_GDEF=generate_GDEF, store_editor_state=store_editor_state, ) ufos = {} for source in designspace.sources: if source.filename in ufos: assert source.font is ufos[source.filename] continue if create_background_layers: ufo_create_background_layer_for_all_glyphs(source.font) ufo_path = os.path.join(master_dir, source.filename) clean_ufo(ufo_path) source.font.save(ufo_path) if normalize_ufos: import ufonormalizer ufonormalizer.normalizeUFO(ufo_path, writeModTimes=False) ufos[source.filename] = source.font if not designspace_path: designspace_path = os.path.join(master_dir, designspace.filename) designspace.write(designspace_path) return Masters(ufos, designspace_path)
Write and return UFOs from the masters and the designspace defined in a .glyphs file. Args: master_dir: Directory where masters are written. designspace_instance_dir: If provided, a designspace document will be written alongside the master UFOs though no instances will be built. family_name: If provided, the master UFOs will be given this name and only instances with this name will be included in the designspace. Returns: A named tuple of master UFOs (`ufos`) and the path to the designspace file (`designspace_path`).
5,180
def push(self, line): self.buffer.append(line) source = "\n".join(self.buffer) more = self.runsource(source, self.filename) if not more: self.resetbuffer() return more
Push a line to the interpreter. The line should not have a trailing newline; it may have internal newlines. The line is appended to a buffer and the interpreter's runsource() method is called with the concatenated contents of the buffer as source. If this indicates that the command was executed or invalid, the buffer is reset; otherwise, the command is incomplete, and the buffer is left as it was after the line was appended. The return value is 1 if more input is required, 0 if the line was dealt with in some way (this is the same as runsource()).
5,181
def overview(): search = Credential.search() search.aggs.bucket(, , field=, order={: }, size=20)\ .metric(, , field=) \ .metric(, , field=) \ .metric(, , docvalue_fields=[], size=100) response = search.execute() print_line("{0:65} {1:5} {2:5} {3:5} {4}".format("Secret", "Count", "Hosts", "Users", "Usernames")) print_line("-"*100) for entry in response.aggregations.password_count.buckets: usernames = [] for creds in entry.top_hits: usernames.append(creds.username[0]) usernames = list(set(usernames)) print_line("{0:65} {1:5} {2:5} {3:5} {4}".format(entry.key, entry.doc_count, entry.host_count.value, entry.username_count.value, usernames))
Provides an overview of the duplicate credentials.
5,182
def launch_svc_event_handler(self, service): service.get_event_handlers(self.hosts, self.daemon.macromodulations, self.daemon.timeperiods, ext_cmd=True)
Launch event handler for a service Format of the line that triggers function call:: LAUNCH_SVC_EVENT_HANDLER;<host_name>;<service_description> :param service: service to execute the event handler :type service: alignak.objects.service.Service :return: None
5,183
def appendGraph(self, graph_name, graph): self._graphDict[graph_name] = graph self._graphNames.append(graph_name) if not self.isMultigraph and len(self._graphNames) > 1: raise AttributeError("Simple Munin Plugins cannot have more than one graph.")
Utility method to associate Graph Object to Plugin. This utility method is for use in constructor of child classes for associating a MuninGraph instances to the plugin. @param graph_name: Graph Name @param graph: MuninGraph Instance
5,184
def _run_single(self, thread_id, agent, environment, deterministic=False, max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None): old_episode_finished = False if episode_finished is not None and len(getargspec(episode_finished).args) == 1: old_episode_finished = True episode = 0 while not self.should_stop: state = environment.reset() agent.reset() self.global_timestep, self.global_episode = agent.timestep, agent.episode episode_reward = 0 time_step = 0 time_start = time.time() while True: action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False) reward = 0 for repeat in xrange(self.repeat_actions): state, terminal, step_reward = environment.execute(action=action) reward += step_reward if terminal: break if not testing: agent.atomic_observe( states=state, actions=action, internals=internals, reward=reward, terminal=terminal ) if sleep is not None: time.sleep(sleep) time_step += 1 episode_reward += reward if terminal or time_step == max_episode_timesteps: break if self.should_stop: return self.global_timestep += time_step self.episode_list_lock.acquire() self.episode_rewards.append(episode_reward) self.episode_timesteps.append(time_step) self.episode_times.append(time.time() - time_start) self.episode_list_lock.release() if episode_finished is not None: if old_episode_finished: summary_data = { "thread_id": thread_id, "episode": episode, "timestep": time_step, "episode_reward": episode_reward } if not episode_finished(summary_data): return elif not episode_finished(self, thread_id): return episode += 1
The target function for a thread, runs an agent and environment until signaled to stop. Adds rewards to shared episode rewards list. Args: thread_id (int): The ID of the thread that's running this target function. agent (Agent): The Agent object that this particular thread uses. environment (Environment): The Environment object that this particular thread uses. max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes. episode_finished (callable): Function called after each episode that takes an episode summary spec and returns False, if this single run should terminate after this episode. Can be used e.g. to set a particular mean reward threshold.
5,185
def get_python(self): cursor = super(ReferenceField, self).get_python() if self.multiselect: return cursor else: try: return cursor[0] except IndexError: return None
Return cursor if multi-select, direct value if single-select
5,186
def write_result(self, data): data[] = ujson.dumps(data[]) self.results.append(data) if len(self.results) >= 150: with db.execution_context(): with db.atomic(): Result.insert_many(self.results).execute() del self.results[:]
Write the results received to the database :param dict data: the data to save in database :return: None
5,187
def create_custom_trees(cls, obj, options=None): clones, id_mapping = {}, [] obj_ids = cls.get_object_ids(obj) offset = cls.id_offset() obj_ids = [None] if len(obj_ids)==0 else obj_ids for tree_id in obj_ids: if tree_id is not None and tree_id in Store.custom_options(): original = Store.custom_options()[tree_id] clone = OptionTree(items = original.items(), groups = original.groups) clones[tree_id + offset + 1] = clone id_mapping.append((tree_id, tree_id + offset + 1)) else: clone = OptionTree(groups=Store.options().groups) clones[offset] = clone id_mapping.append((tree_id, offset)) for k in Store.options(): if k in [(opt.split()[0],) for opt in options]: group = {grp:Options( allowed_keywords=opt.allowed_keywords) for (grp, opt) in Store.options()[k].groups.items()} clone[k] = group return {k:cls.apply_customizations(options, t) if options else t for k,t in clones.items()}, id_mapping
Returns the appropriate set of customized subtree clones for an object, suitable for merging with Store.custom_options (i.e with the ids appropriately offset). Note if an object has no integer ids a new OptionTree is built. The id_mapping return value is a list mapping the ids that need to be matched as set to their new values.
5,188
def security(self, domain): uri = self._uris["security"].format(domain) return self.get_parse(uri)
Get the Security Information for the given domain. For details, see https://investigate.umbrella.com/docs/api#securityInfo
5,189
def _load_cache(self, filename): try: with open(filename + self.CACHE_EXTENSION, ) as file: prev_number_of_pages, prev_page_references = pickle.load(file) except (IOError, TypeError): prev_number_of_pages, prev_page_references = {}, {} return prev_number_of_pages, prev_page_references
Load the cached page references from `<filename>.ptc`.
5,190
def on_site(self): return super(EntryPublishedManager, self).get_queryset().filter( sites=Site.objects.get_current())
Return entries published on current site.
5,191
def print(self, indent=0): LOG.info("%s:: %s", indent * " ", self.__class__) for ext in self.next_extensions: ext.print(indent=indent + 2)
Print a structural view of the registered extensions.
5,192
def bitcount(self, key, start=None, end=None): if start is None and end is not None: raise TypeError("both start and stop must be specified") elif start is not None and end is None: raise TypeError("both start and stop must be specified") elif start is not None and end is not None: args = (start, end) else: args = () return self.execute(b, key, *args)
Count set bits in a string. :raises TypeError: if only start or end specified.
5,193
def put_meta(request): if django.conf.settings.REQUIRE_WHITELIST_FOR_UPDATE: d1_gmn.app.auth.assert_create_update_delete_permission(request) d1_gmn.app.util.coerce_put_post(request) d1_gmn.app.views.assert_db.post_has_mime_parts( request, ((, ), (, )) ) pid = request.POST[] d1_gmn.app.auth.assert_allowed(request, d1_gmn.app.auth.WRITE_LEVEL, pid) new_sysmeta_pyxb = d1_gmn.app.sysmeta.deserialize(request.FILES[]) d1_gmn.app.views.assert_sysmeta.has_matching_modified_timestamp(new_sysmeta_pyxb) d1_gmn.app.views.create.set_mn_controlled_values( request, new_sysmeta_pyxb, is_modification=True ) d1_gmn.app.sysmeta.create_or_update(new_sysmeta_pyxb) d1_gmn.app.event_log.log_update_event( pid, request, timestamp=d1_common.date_time.normalize_datetime_to_utc( new_sysmeta_pyxb.dateUploaded ), ) return d1_gmn.app.views.util.http_response_with_boolean_true_type()
MNStorage.updateSystemMetadata(session, pid, sysmeta) → boolean. TODO: Currently, this call allows making breaking changes to SysMeta. We need to clarify what can be modified and what the behavior should be when working with SIDs and chains.
5,194
def ConsumeByteString(self): the_list = [self._ConsumeSingleByteString()] while self.token and self.token[0] in _QUOTES: the_list.append(self._ConsumeSingleByteString()) return b.join(the_list)
Consumes a byte array value. Returns: The array parsed (as a string). Raises: ParseError: If a byte array value couldn't be consumed.
5,195
def libvlc_video_get_logo_int(p_mi, option): f = _Cfunctions.get(, None) or \ _Cfunction(, ((1,), (1,),), None, ctypes.c_int, MediaPlayer, ctypes.c_uint) return f(p_mi, option)
Get integer logo option. @param p_mi: libvlc media player instance. @param option: logo option to get, values of libvlc_video_logo_option_t.
5,196
def send_dictation_result(self, result, sentences=None, app_uuid=None): assert self._session_id != VoiceService.SESSION_ID_INVALID assert isinstance(result, TranscriptionResult) transcription = None if result == TranscriptionResult.Success: if len(sentences) > 0: s_list = [] for s in sentences: words = [Word(confidence=100, data=w) for w in s] s_list.append(Sentence(words=words)) transcription = Transcription(transcription=SentenceList(sentences=s_list)) flags = 0 if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) flags |= Flags.AppInitiated attributes = [] if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) attributes.append(Attribute(id=AttributeType.AppUuid, data=AppUuid(uuid=app_uuid))) if transcription is not None: attributes.append(Attribute(id=AttributeType.Transcription, data=transcription)) logger.debug("Sending dictation result (result={}".format(result) + ", app={})".format(app_uuid) if app_uuid is not None else ")") self._pebble.send_packet(VoiceControlResult(flags=flags, data=DictationResult( session_id=self._session_id, result=result, attributes=AttributeList(dictionary=attributes)))) self._session_id = VoiceService.SESSION_ID_INVALID
Send the result of a dictation session :param result: Result of the session :type result: DictationResult :param sentences: list of sentences, each of which is a list of words and punctuation :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID
5,197
def create_from_pybankid_exception(cls, exception): return cls( "{0}: {1}".format(exception.__class__.__name__, str(exception)), _exception_class_to_status_code.get(exception.__class__), )
Class method for initiating from a `PyBankID` exception. :param bankid.exceptions.BankIDError exception: :return: The wrapped exception. :rtype: :py:class:`~FlaskPyBankIDError`
5,198
def get_config_path(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME): return os.path.join(appdirs.user_config_dir, file_name)
Return the path where the config file is stored. Args: app_name (text_type, optional): Name of the application, defaults to ``'projecthamster``. Allows you to use your own application specific namespace if you wish. file_name (text_type, optional): Name of the config file. Defaults to ``config.conf``. Returns: str: Fully qualified path (dir & filename) where we expect the config file.
5,199
def _get_entity_by_class(self, entity_cls): entity_qualname = fully_qualified_name(entity_cls) if entity_qualname in self._registry: return self._registry[entity_qualname] else: return self._find_entity_in_records_by_class_name(entity_cls.__name__)
Fetch Entity record with Entity class details