code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def every_other(iterable): """ Yield every other item from the iterable >>> ' '.join(every_other('abcdefg')) 'a c e g' """ items = iter(iterable) while True: try: yield next(items) next(items) except StopIteration: return
Yield every other item from the iterable >>> ' '.join(every_other('abcdefg')) 'a c e g'
def _check_dn(self, dn, attr_value): """Check dn attribute for issues.""" if dn is not None: self._error('Two lines starting with dn: in one record.') if not is_dn(attr_value): self._error('No valid string-representation of ' 'distinguished name %s.' % attr_value)
Check dn attribute for issues.
def devop_write(self, args, bustype): '''write to a device''' usage = "Usage: devop write <spi|i2c> name bus address regstart count <bytes>" if len(args) < 5: print(usage) return name = args[0] bus = int(args[1],base=0) address = int(args[2],base=0) reg = int(args[3],base=0) count = int(args[4],base=0) args = args[5:] if len(args) < count: print(usage) return bytes = [0]*128 for i in range(count): bytes[i] = int(args[i],base=0) self.master.mav.device_op_write_send(self.target_system, self.target_component, self.request_id, bustype, bus, address, name, reg, count, bytes) self.request_id += 1
write to a device
def help(i): """ Input: { } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 help - help text } """ o=i.get('out','') m=i.get('module_uoa','') if m=='': m='<module_uoa>' h= 'Usage: '+cfg['cmd'].replace('$#module_uoa#$', m)+'\n' h+='\n' h+=' Common actions:\n' for q in sorted(cfg['common_actions']): s=q desc=cfg['actions'][q].get('desc','') if desc!='': s+=' - '+desc h+=' '+s+'\n' h+='\n' h+=' Module actions:\n' if m=='<module_uoa>': # Internal actions (for this kernel) for q in sorted(cfg['actions']): if q not in cfg['common_actions']: s=q desc=cfg['actions'][q].get('desc','') if desc!='': s+=' - '+desc h+=' '+s+'\n' else: # Attempt to load r=list_actions({'module_uoa':m}) if r['return']>0: return r actions=r['actions'] if len(actions)==0: h+=' Not described yet ...\n' else: for q in sorted(actions.keys()): s=q desc=actions[q].get('desc','') if desc!='': s+=' - '+desc h+=' '+s+'\n' if m=='<module_uoa>': h+='\n' h+=cfg['help_examples'] h+='\n' h+=cfg['help_web'] if o=='con': out(h) return {'return':0, 'help':h}
Input: { } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 help - help text }
def split(self, k): """Return a tuple of two tables where the first table contains ``k`` rows randomly sampled and the second contains the remaining rows. Args: ``k`` (int): The number of rows randomly sampled into the first table. ``k`` must be between 1 and ``num_rows - 1``. Raises: ``ValueError``: ``k`` is not between 1 and ``num_rows - 1``. Returns: A tuple containing two instances of ``Table``. >>> jobs = Table().with_columns( ... 'job', make_array('a', 'b', 'c', 'd'), ... 'wage', make_array(10, 20, 15, 8)) >>> jobs job | wage a | 10 b | 20 c | 15 d | 8 >>> sample, rest = jobs.split(3) >>> sample # doctest: +SKIP job | wage c | 15 a | 10 b | 20 >>> rest # doctest: +SKIP job | wage d | 8 """ if not 1 <= k <= self.num_rows - 1: raise ValueError("Invalid value of k. k must be between 1 and the" "number of rows - 1") rows = np.random.permutation(self.num_rows) first = self.take(rows[:k]) rest = self.take(rows[k:]) for column_label in self._formats: first._formats[column_label] = self._formats[column_label] rest._formats[column_label] = self._formats[column_label] return first, rest
Return a tuple of two tables where the first table contains ``k`` rows randomly sampled and the second contains the remaining rows. Args: ``k`` (int): The number of rows randomly sampled into the first table. ``k`` must be between 1 and ``num_rows - 1``. Raises: ``ValueError``: ``k`` is not between 1 and ``num_rows - 1``. Returns: A tuple containing two instances of ``Table``. >>> jobs = Table().with_columns( ... 'job', make_array('a', 'b', 'c', 'd'), ... 'wage', make_array(10, 20, 15, 8)) >>> jobs job | wage a | 10 b | 20 c | 15 d | 8 >>> sample, rest = jobs.split(3) >>> sample # doctest: +SKIP job | wage c | 15 a | 10 b | 20 >>> rest # doctest: +SKIP job | wage d | 8
def forget_subject(sid): ''' forget_subject(sid) causes neuropythy's freesurfer module to forget about cached data for the subject with subject id sid. The sid may be any sid that can be passed to the subject() function. This function is useful for batch-processing of subjects in a memory-limited environment; e.g., if you run out of memory while processing FreeSurfer subjects it is possibly because neuropythy is caching all of their data instead of freeing it. ''' sub = subject(sid) if sub.path in subject._cache: del subject._cache[sub.path] else: for (k,v) in six.iteritems(subject._cache): if v is sub: del subject._cache[k] break return None
forget_subject(sid) causes neuropythy's freesurfer module to forget about cached data for the subject with subject id sid. The sid may be any sid that can be passed to the subject() function. This function is useful for batch-processing of subjects in a memory-limited environment; e.g., if you run out of memory while processing FreeSurfer subjects it is possibly because neuropythy is caching all of their data instead of freeing it.
def list_passwords(kwargs=None, call=None): ''' List all password on the account .. versionadded:: 2015.8.0 ''' response = _query('support', 'password/list') ret = {} for item in response['list']: if 'server' in item: server = item['server']['name'] if server not in ret: ret[server] = [] ret[server].append(item) return ret
List all password on the account .. versionadded:: 2015.8.0
def parse_mmtf_header(infile): """Parse an MMTF file and return basic header-like information. Args: infile (str): Path to MMTF file Returns: dict: Dictionary of parsed header Todo: - Can this be sped up by not parsing the 3D coordinate info somehow? - OR just store the sequences when this happens since it is already being parsed. """ infodict = {} mmtf_decoder = mmtf.parse(infile) infodict['date'] = mmtf_decoder.deposition_date infodict['release_date'] = mmtf_decoder.release_date try: infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods] except AttributeError: infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods] infodict['resolution'] = mmtf_decoder.resolution infodict['description'] = mmtf_decoder.title group_name_exclude = ['HOH'] chem_comp_type_exclude = ['l-peptide linking', 'peptide linking'] chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude])) infodict['chemicals'] = chemicals return infodict
Parse an MMTF file and return basic header-like information. Args: infile (str): Path to MMTF file Returns: dict: Dictionary of parsed header Todo: - Can this be sped up by not parsing the 3D coordinate info somehow? - OR just store the sequences when this happens since it is already being parsed.
def integrate(self, outevent, inevent): """Propagate function time ratio along the function calls. Must be called after finding the cycles. See also: - http://citeseer.ist.psu.edu/graham82gprof.html """ # Sanity checking assert outevent not in self for function in compat_itervalues(self.functions): assert outevent not in function assert inevent in function for call in compat_itervalues(function.calls): assert outevent not in call if call.callee_id != function.id: assert call.ratio is not None # Aggregate the input for each cycle for cycle in self.cycles: total = inevent.null() for function in compat_itervalues(self.functions): total = inevent.aggregate(total, function[inevent]) self[inevent] = total # Integrate along the edges total = inevent.null() for function in compat_itervalues(self.functions): total = inevent.aggregate(total, function[inevent]) self._integrate_function(function, outevent, inevent) self[outevent] = total
Propagate function time ratio along the function calls. Must be called after finding the cycles. See also: - http://citeseer.ist.psu.edu/graham82gprof.html
def raise_for_missing_name(self, line: str, position: int, namespace: str, name: str) -> None: """Raise an exception if the namespace is not defined or if it does not validate the given name.""" self.raise_for_missing_namespace(line, position, namespace, name) if self.has_enumerated_namespace(namespace) and not self.has_enumerated_namespace_name(namespace, name): raise MissingNamespaceNameWarning(self.get_line_number(), line, position, namespace, name) if self.has_regex_namespace(namespace) and not self.has_regex_namespace_name(namespace, name): raise MissingNamespaceRegexWarning(self.get_line_number(), line, position, namespace, name)
Raise an exception if the namespace is not defined or if it does not validate the given name.
def pause(self, remaining_pause_cycles): """Pause a subscription""" url = urljoin(self._url, '/pause') elem = ElementTreeBuilder.Element(self.nodename) elem.append(Resource.element_for_value('remaining_pause_cycles', remaining_pause_cycles)) body = ElementTree.tostring(elem, encoding='UTF-8') response = self.http_request(url, 'PUT', body, { 'Content-Type': 'application/xml; charset=utf-8' }) if response.status not in (200, 201, 204): self.raise_http_error(response) self.update_from_element(ElementTree.fromstring(response.read()))
Pause a subscription
def rule_role(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa") index_key = ET.SubElement(rule, "index") index_key.text = kwargs.pop('index') role = ET.SubElement(rule, "role") role.text = kwargs.pop('role') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _read_bytes_from_non_framed_body(self, b): """Reads the requested number of bytes from a streaming non-framed message body. :param int b: Number of bytes to read :returns: Decrypted bytes from source stream :rtype: bytes """ _LOGGER.debug("starting non-framed body read") # Always read the entire message for non-framed message bodies. bytes_to_read = self.body_length _LOGGER.debug("%d bytes requested; reading %d bytes", b, bytes_to_read) ciphertext = self.source_stream.read(bytes_to_read) if len(self.output_buffer) + len(ciphertext) < self.body_length: raise SerializationError("Total message body contents less than specified in body description") if self.verifier is not None: self.verifier.update(ciphertext) tag = deserialize_tag(stream=self.source_stream, header=self._header, verifier=self.verifier) aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string( content_type=self._header.content_type, is_final_frame=True ) associated_data = assemble_content_aad( message_id=self._header.message_id, aad_content_string=aad_content_string, seq_num=1, length=self.body_length, ) self.decryptor = Decryptor( algorithm=self._header.algorithm, key=self._derived_data_key, associated_data=associated_data, iv=self._unframed_body_iv, tag=tag, ) plaintext = self.decryptor.update(ciphertext) plaintext += self.decryptor.finalize() self.footer = deserialize_footer(stream=self.source_stream, verifier=self.verifier) return plaintext
Reads the requested number of bytes from a streaming non-framed message body. :param int b: Number of bytes to read :returns: Decrypted bytes from source stream :rtype: bytes
async def openurl(url, **opts): ''' Open a URL to a remote telepath object. Args: url (str): A telepath URL. **opts (dict): Telepath connect options. Returns: (synapse.telepath.Proxy): A telepath proxy object. The telepath proxy may then be used for sync or async calls: proxy = openurl(url) value = proxy.getFooThing() ... or ... proxy = await openurl(url) valu = await proxy.getFooThing() ... or ... async with await openurl(url) as proxy: valu = await proxy.getFooThing() ''' if url.find('://') == -1: newurl = alias(url) if newurl is None: raise s_exc.BadUrl(f':// not found in [{url}] and no alias found!') url = newurl info = s_urlhelp.chopurl(url) info.update(opts) host = info.get('host') port = info.get('port') auth = None user = info.get('user') if user is not None: passwd = info.get('passwd') auth = (user, {'passwd': passwd}) scheme = info.get('scheme') if scheme == 'cell': # cell:///path/to/celldir:share # cell://rel/path/to/celldir:share path = info.get('path') name = info.get('name', '*') # support cell://<relpath>/<to>/<cell> # by detecting host... host = info.get('host') if host: path = path.strip('/') path = os.path.join(host, path) if ':' in path: path, name = path.split(':') full = os.path.join(path, 'sock') link = await s_link.unixconnect(full) elif scheme == 'unix': # unix:///path/to/sock:share path, name = info.get('path').split(':') link = await s_link.unixconnect(path) else: path = info.get('path') name = info.get('name', path[1:]) sslctx = None if scheme == 'ssl': certpath = info.get('certdir') certdir = s_certdir.CertDir(certpath) sslctx = certdir.getClientSSLContext() link = await s_link.connect(host, port, ssl=sslctx) prox = await Proxy.anit(link, name) prox.onfini(link) try: await prox.handshake(auth=auth) except Exception: await prox.fini() raise return prox
Open a URL to a remote telepath object. Args: url (str): A telepath URL. **opts (dict): Telepath connect options. Returns: (synapse.telepath.Proxy): A telepath proxy object. The telepath proxy may then be used for sync or async calls: proxy = openurl(url) value = proxy.getFooThing() ... or ... proxy = await openurl(url) valu = await proxy.getFooThing() ... or ... async with await openurl(url) as proxy: valu = await proxy.getFooThing()
def getWorkflowDir(workflowID, configWorkDir=None): """ Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str """ workDir = configWorkDir or os.getenv('TOIL_WORKDIR') or tempfile.gettempdir() if not os.path.exists(workDir): raise RuntimeError("The directory specified by --workDir or TOIL_WORKDIR (%s) does not " "exist." % workDir) # Create the workflow dir, make it unique to each host in case workDir is on a shared FS. # This prevents workers on different nodes from erasing each other's directories. workflowDir = os.path.join(workDir, 'toil-%s-%s' % (workflowID, getNodeID())) try: # Directory creation is atomic os.mkdir(workflowDir) except OSError as err: if err.errno != 17: # The directory exists if a previous worker set it up. raise else: logger.debug('Created the workflow directory at %s' % workflowDir) return workflowDir
Returns a path to the directory where worker directories and the cache will be located for this workflow. :param str workflowID: Unique identifier for the workflow :param str configWorkDir: Value passed to the program using the --workDir flag :return: Path to the workflow directory :rtype: str
def _get_or_add_image(self, image_file): """ Return an (rId, description, image_size) 3-tuple identifying the related image part containing *image_file* and describing the image. """ image_part, rId = self.part.get_or_add_image_part(image_file) desc, image_size = image_part.desc, image_part._px_size return rId, desc, image_size
Return an (rId, description, image_size) 3-tuple identifying the related image part containing *image_file* and describing the image.
def _solve(self): """Solve the problem with the current objective.""" # Remove temporary constraints while len(self._remove_constr) > 0: self._remove_constr.pop().delete() try: self._prob.solve(lp.ObjectiveSense.Maximize) except lp.SolverError as e: raise_from(FluxBalanceError('Failed to solve: {}'.format( e), result=self._prob.result), e) finally: # Set temporary constraints to be removed on next solve call self._remove_constr = self._temp_constr self._temp_constr = []
Solve the problem with the current objective.
def extract_subsection(im, shape): r""" Extracts the middle section of a image Parameters ---------- im : ND-array Image from which to extract the subsection shape : array_like Can either specify the size of the extracted section or the fractional size of the image to extact. Returns ------- image : ND-array An ND-array of size given by the ``shape`` argument, taken from the center of the image. Examples -------- >>> import scipy as sp >>> from porespy.tools import extract_subsection >>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]]) >>> print(im) [[1 1 1 1] [1 2 2 2] [1 2 3 3] [1 2 3 4]] >>> im = extract_subsection(im=im, shape=[2, 2]) >>> print(im) [[2 2] [2 3]] """ # Check if shape was given as a fraction shape = sp.array(shape) if shape[0] < 1: shape = sp.array(im.shape) * shape center = sp.array(im.shape) / 2 s_im = [] for dim in range(im.ndim): r = shape[dim] / 2 lower_im = sp.amax((center[dim] - r, 0)) upper_im = sp.amin((center[dim] + r, im.shape[dim])) s_im.append(slice(int(lower_im), int(upper_im))) return im[tuple(s_im)]
r""" Extracts the middle section of a image Parameters ---------- im : ND-array Image from which to extract the subsection shape : array_like Can either specify the size of the extracted section or the fractional size of the image to extact. Returns ------- image : ND-array An ND-array of size given by the ``shape`` argument, taken from the center of the image. Examples -------- >>> import scipy as sp >>> from porespy.tools import extract_subsection >>> im = sp.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]]) >>> print(im) [[1 1 1 1] [1 2 2 2] [1 2 3 3] [1 2 3 4]] >>> im = extract_subsection(im=im, shape=[2, 2]) >>> print(im) [[2 2] [2 3]]
def update_with_result(self, result): """Update item-model with result from host State is sent from host after processing had taken place and represents the events that took place; including log messages and completion status. Arguments: result (dict): Dictionary following the Result schema """ assert isinstance(result, dict), "%s is not a dictionary" % result for type in ("instance", "plugin"): id = (result[type] or {}).get("id") is_context = not id if is_context: item = self.instances[0] else: item = self.items.get(id) if item is None: # If an item isn't there yet # no worries. It's probably because # reset is still running and the # item in question is a new instance # not yet added to the model. continue item.isProcessing = False item.currentProgress = 1 item.processed = True item.hasWarning = item.hasWarning or any([ record["levelno"] == logging.WARNING for record in result["records"] ]) if result.get("error"): item.hasError = True item.amountFailed += 1 else: item.succeeded = True item.amountPassed += 1 item.duration += result["duration"] item.finishedAt = time.time() if item.itemType == "plugin" and not item.actionsIconVisible: actions = list(item.actions) # Context specific actions for action in list(actions): if action["on"] == "failed" and not item.hasError: actions.remove(action) if action["on"] == "succeeded" and not item.succeeded: actions.remove(action) if action["on"] == "processed" and not item.processed: actions.remove(action) if actions: item.actionsIconVisible = True # Update section item class DummySection(object): hasWarning = False hasError = False succeeded = False section_item = DummySection() for section in self.sections: if item.itemType == "plugin" and section.name == item.verb: section_item = section if (item.itemType == "instance" and section.name == item.category): section_item = section section_item.hasWarning = ( section_item.hasWarning or item.hasWarning ) section_item.hasError = section_item.hasError or item.hasError section_item.succeeded = section_item.succeeded or item.succeeded section_item.isProcessing = False
Update item-model with result from host State is sent from host after processing had taken place and represents the events that took place; including log messages and completion status. Arguments: result (dict): Dictionary following the Result schema
def restore_flattened_dict(i): """ Input: { dict - flattened dict } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 dict - restored dict } """ # Check vars a={} # default b=i['dict'] first=True for x in b: if first: first=False y=x[1:2] if y=='@': a=[] else: a={} set_by_flat_key({'dict':a, 'key':x, 'value':b[x]}) return {'return':0, 'dict': a}
Input: { dict - flattened dict } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 dict - restored dict }
def _store_variable(self, j, key, m, value): """Store a copy of the variable in the history """ if hasattr(value, 'copy'): v = value.copy() else: v = value self.history[j][key][m].append(v)
Store a copy of the variable in the history
def get_feature_info(feature): """Returns a dict with feature information""" dimensions = feature.findall('position') for dim in dimensions: if dim.attrib['dim'] == '0': rt = dim.text elif dim.attrib['dim'] == '1': mz = dim.text return {'rt': float(rt), 'mz': float(mz), 'charge': int(feature.find('charge').text), 'intensity': float(feature.find('intensity').text), }
Returns a dict with feature information
def get_gene_symbols(chrom, start, stop): """Get the gene symbols that a interval overlaps""" gene_symbols = query_gene_symbol(chrom, start, stop) logger.debug("Found gene symbols: {0}".format(', '.join(gene_symbols))) return gene_symbols
Get the gene symbols that a interval overlaps
def view_count_plus(slug): ''' View count plus one. ''' entry = TabWiki.update( view_count=TabWiki.view_count + 1, ).where(TabWiki.uid == slug) entry.execute()
View count plus one.
def getInvestigators(self, tags = None, seperator = ";", _getTag = False): """Returns a list of the names of investigators. The optional arguments are ignored. # Returns `list [str]` > A list of all the found investigator's names """ if tags is None: tags = ['Investigator'] elif isinstance(tags, str): tags = ['Investigator', tags] else: tags.append('Investigator') return super().getInvestigators(tags = tags, seperator = seperator, _getTag = _getTag)
Returns a list of the names of investigators. The optional arguments are ignored. # Returns `list [str]` > A list of all the found investigator's names
def translate_char(source_char, carrier, reverse=False, encoding=False): u"""translate unicode emoji character to unicode carrier emoji character (or reverse) Attributes: source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode carrier - the target carrier reverse - if you want to translate CARRIER => UNICODE, turn it True encoding - encoding name for decode (Default is None) """ if not isinstance(source_char, unicode) and encoding: source_char = source_char.decode(encoding, 'replace') elif not isinstance(source_char, unicode): raise AttributeError(u"`source_char` must be decoded to `unicode` or set `encoding` attribute to decode `source_char`") if len(source_char) > 1: raise AttributeError(u"`source_char` must be a letter. use `translate` method insted.") translate_dictionary = _loader.translate_dictionaries[carrier] if not reverse: translate_dictionary = translate_dictionary[0] else: translate_dictionary = translate_dictionary[1] if not translate_dictionary: return source_char return translate_dictionary.get(source_char, source_char)
u"""translate unicode emoji character to unicode carrier emoji character (or reverse) Attributes: source_char - emoji character. it must be unicode instance or have to set `encoding` attribute to decode carrier - the target carrier reverse - if you want to translate CARRIER => UNICODE, turn it True encoding - encoding name for decode (Default is None)
def complete(self, stream): """Complete the pending stream. Any connections made to :py:attr:`stream` are connected to `stream` once this method returns. Args: stream(Stream): Stream that completes the connection. """ assert not self.is_complete() self._marker.addInputPort(outputPort=stream.oport) self.stream.oport.schema = stream.oport.schema # Update the pending schema to the actual schema # Any downstream filters that took the reference # will be automatically updated to the correct schema self._pending_schema._set(self.stream.oport.schema) # Mark the operator with the pending stream # a start point for graph travesal stream.oport.operator._start_op = True
Complete the pending stream. Any connections made to :py:attr:`stream` are connected to `stream` once this method returns. Args: stream(Stream): Stream that completes the connection.
def _get_default_namespace(self) -> Optional[Namespace]: """Get the reference BEL namespace if it exists.""" return self._get_query(Namespace).filter(Namespace.url == self._get_namespace_url()).one_or_none()
Get the reference BEL namespace if it exists.
def _check_inplace_setting(self, value): """ check whether we allow in-place setting with this type of value """ if self._is_mixed_type: if not self._is_numeric_mixed_type: # allow an actual np.nan thru try: if np.isnan(value): return True except Exception: pass raise TypeError('Cannot do inplace boolean setting on ' 'mixed-types with a non np.nan value') return True
check whether we allow in-place setting with this type of value
def update_transfer_job(self, job_name, body): """ Updates a transfer job that runs periodically. :param job_name: (Required) Name of the job to be updated :type job_name: str :param body: A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: If successful, TransferJob. :rtype: dict """ body = self._inject_project_id(body, BODY, PROJECT_ID) return ( self.get_conn() .transferJobs() .patch(jobName=job_name, body=body) .execute(num_retries=self.num_retries) )
Updates a transfer job that runs periodically. :param job_name: (Required) Name of the job to be updated :type job_name: str :param body: A request body, as described in https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body :type body: dict :return: If successful, TransferJob. :rtype: dict
def add_output(self, address, value, unit='satoshi'): """ Add an output (a person who will receive funds via this tx). If no unit is specified, satoshi is implied. """ value_satoshi = self.from_unit_to_satoshi(value, unit) if self.verbose: print("Adding output of: %s satoshi (%.8f)" % ( value_satoshi, (value_satoshi / 1e8) )) self.outs.append({ 'address': address, 'value': value_satoshi })
Add an output (a person who will receive funds via this tx). If no unit is specified, satoshi is implied.
def _select_date_range(self, lines): """Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file. """ headers = [] num_lev = [] dates = [] # Get indices of headers, and make a list of dates and num_lev for idx, line in enumerate(lines): if line[0] == '#': year, month, day, hour = map(int, line[13:26].split()) # All soundings have YMD, most have hour try: date = datetime.datetime(year, month, day, hour) except ValueError: date = datetime.datetime(year, month, day) # Check date if self.begin_date <= date <= self.end_date: headers.append(idx) num_lev.append(int(line[32:36])) dates.append(date) if date > self.end_date: break if len(dates) == 0: # Break if no matched dates. # Could improve this later by showing the date range for the station. raise ValueError('No dates match selection.') # Compress body of data into a string begin_idx = min(headers) end_idx = max(headers) + num_lev[-1] # Make a boolean vector that selects only list indices within the time range selector = np.zeros(len(lines), dtype=bool) selector[begin_idx:end_idx + 1] = True selector[headers] = False body = ''.join([line for line in itertools.compress(lines, selector)]) selector[begin_idx:end_idx + 1] = ~selector[begin_idx:end_idx + 1] header = ''.join([line for line in itertools.compress(lines, selector)]) # expand date vector to match length of the body dataframe. dates_long = np.repeat(dates, num_lev) return body, header, dates_long, dates
Identify lines containing headers within the range begin_date to end_date. Parameters ----- lines: list list of lines from the IGRA2 data file.
def _build(self, name, **params): """ Rebuild operations by removing open modules that no longer need to be watched, and adding new modules if they are not currently being watched. This is done by comparing self.modules to watch_files.paths_open """ log = self._getparam('log', self._discard, **params) # Find all the modules that no longer need watching # rebuild = False wparams = params.copy() wparams['commit'] = False for path in list(self._watch.paths_open): if path in self.modules: continue try: self._watch.remove(path, **wparams) rebuild = True except Exception as e: log.warning("Remove of watched module %r failed -- %s", path, e) log.debug("Removed watch for path %r", path) # Find all the modules that are new and should be watched # for path in list(self.modules): if path not in self._watch.paths_open: try: self._watch.add(path, **wparams) rebuild = True except Exception as e: log.error("watch failed on module %r -- %s", path, e) continue if rebuild: self._watch.commit(**params)
Rebuild operations by removing open modules that no longer need to be watched, and adding new modules if they are not currently being watched. This is done by comparing self.modules to watch_files.paths_open
def get_or_create_media(self, api_media): """ Find or create a Media object given API data. :param api_media: the API data for the Media :return: a tuple of an Media instance and a boolean indicating whether the Media was created or not """ return Media.objects.get_or_create(site_id=self.site_id, wp_id=api_media["ID"], defaults=self.api_object_data("media", api_media))
Find or create a Media object given API data. :param api_media: the API data for the Media :return: a tuple of an Media instance and a boolean indicating whether the Media was created or not
def get_summary_stats(self, output_csv=None): """Generates a CSV report with summary statistics about the assembly The calculated statistics are: - Number of contigs - Average contig size - N50 - Total assembly length - Average GC content - Amount of missing data Parameters ---------- output_csv: str Name of the output CSV file. """ contig_size_list = [] self.summary_info["ncontigs"] = len(self.contigs) for contig_id, sequence in self.contigs.items(): logger.debug("Processing contig: {}".format(contig_id)) # Get contig sequence size contig_len = len(sequence) # Add size for average contig size contig_size_list.append(contig_len) # Add to total assembly length self.summary_info["total_len"] += contig_len # Add to average gc self.summary_info["avg_gc"].append( sum(map(sequence.count, ["G", "C"])) / contig_len ) # Add to missing data self.summary_info["missing_data"] += sequence.count("N") # Get average contig size logger.debug("Getting average contig size") self.summary_info["avg_contig_size"] = \ sum(contig_size_list) / len(contig_size_list) # Get average gc content logger.debug("Getting average GC content") self.summary_info["avg_gc"] = \ sum(self.summary_info["avg_gc"]) / len(self.summary_info["avg_gc"]) # Get N50 logger.debug("Getting N50") cum_size = 0 for l in sorted(contig_size_list, reverse=True): cum_size += l if cum_size >= self.summary_info["total_len"] / 2: self.summary_info["n50"] = l break if output_csv: logger.debug("Writing report to csv") # Write summary info to CSV with open(output_csv, "w") as fh: summary_line = "{}, {}\\n".format( self.sample, ",".join( [str(x) for x in self.summary_info.values()])) fh.write(summary_line)
Generates a CSV report with summary statistics about the assembly The calculated statistics are: - Number of contigs - Average contig size - N50 - Total assembly length - Average GC content - Amount of missing data Parameters ---------- output_csv: str Name of the output CSV file.
def create(cls, name, key_chain_entry): """ Create a key chain with list of keys Key_chain_entry format is:: [{'key': 'xxxx', 'key_id': 1-255, 'send_key': True|False}] :param str name: Name of key chain :param list key_chain_entry: list of key chain entries :raises CreateElementFailed: create failed with reason :return: instance with meta :rtype: OSPFKeyChain """ key_chain_entry = key_chain_entry or [] json = {'name': name, 'ospfv2_key_chain_entry': key_chain_entry} return ElementCreator(cls, json)
Create a key chain with list of keys Key_chain_entry format is:: [{'key': 'xxxx', 'key_id': 1-255, 'send_key': True|False}] :param str name: Name of key chain :param list key_chain_entry: list of key chain entries :raises CreateElementFailed: create failed with reason :return: instance with meta :rtype: OSPFKeyChain
def format(self, vertices): """Format instance to dump vertices is dict of name to Vertex """ index = ' '.join(str(vertices[vn].index) for vn in self.vnames) vcom = ' '.join(self.vnames) # for comment buf = io.StringIO() buf.write('spline {0:s} '\ '// {1:s} ({2:s})'.format( index,self.name, vcom)) buf.write('\n (\n') for p in self.points: buf.write(' '+p.format()+'\n') buf.write('\n )\n') buf.write('') return buf.getvalue()
Format instance to dump vertices is dict of name to Vertex
def run_until_disconnected(self): """ Runs the event loop until `disconnect` is called or if an error while connecting/sending/receiving occurs in the background. In the latter case, said error will ``raise`` so you have a chance to ``except`` it on your own code. If the loop is already running, this method returns a coroutine that you should await on your own code. """ if self.loop.is_running(): return self._run_until_disconnected() try: return self.loop.run_until_complete(self.disconnected) except KeyboardInterrupt: pass finally: # No loop.run_until_complete; it's already syncified self.disconnect()
Runs the event loop until `disconnect` is called or if an error while connecting/sending/receiving occurs in the background. In the latter case, said error will ``raise`` so you have a chance to ``except`` it on your own code. If the loop is already running, this method returns a coroutine that you should await on your own code.
def docpie(doc, argv=None, help=True, version=None, stdopt=True, attachopt=True, attachvalue=True, helpstyle='python', auto2dashes=True, name=None, case_sensitive=False, optionsfirst=False, appearedonly=False, namedoptions=False, extra=None): """ Parse `argv` based on command-line interface described in `doc`. `docpie` creates your command-line interface based on its description that you pass as `doc`. Such description can contain --options, <positional-argument>, commands, which could be [optional], (required), (mutually | exclusive) or repeated... Parameters ---------- doc : str Description of your command-line interface. argv : list of str, optional Argument vector to be parsed. sys.argv is used if not provided. help : bool (default: True) Set to False to disable automatic help on -h or --help options. version : any object but None If passed, the object will be printed if --version is in `argv`. stdopt : bool (default: True) When it's True, long flag should only starts with -- attachopt: bool (default: True) write/pass several short flag into one, e.g. -abc can mean -a -b -c. This only works when stdopt=True attachvalue: bool (default: True) allow you to write short flag and its value together, e.g. -abc can mean -a bc auto2dashes: bool (default: True) automaticly handle -- (which means "end of command line flag") name: str (default: None) the "name" of your program. In each of your "usage" the "name" will be ignored. By default docpie will ignore the first element of your "usage". case_sensitive: bool (deprecated / default: False) specifies if it need case sensitive when matching "Usage:" and "Options:" optionsfirst: bool (default: False) everything after first positional argument will be interpreted as positional argument appearedonly: bool (default: False) when set True, the options that never appear in argv will not be put in result. Note this only affect options extra: dict customize pre-handled options. See http://docpie.comes.today/document/advanced-apis/ for more infomation. Returns ------- args : dict A dictionary, where keys are names of command-line elements such as e.g. "--verbose" and "<path>", and values are the parsed values of those elements. Example ------- >>> from docpie import docpie >>> doc = ''' ... Usage: ... my_program tcp <host> <port> [--timeout=<seconds>] ... my_program serial <port> [--baud=<n>] [--timeout=<seconds>] ... my_program (-h | --help | --version) ... ... Options: ... -h, --help Show this screen and exit. ... --baud=<n> Baudrate [default: 9600] ... ''' >>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30'] >>> docpie(doc, argv) { '--': False, '-h': False, '--baud': '9600', '--help': False, '--timeout': '30', '--version': False, '<host>': '127.0.0.1', '<port>': '80', 'serial': False, 'tcp': True} See also -------- * Full documentation is available in README.md as well as online at http://docpie.comes.today/document/quick-start/ """ if case_sensitive: warnings.warn('`case_sensitive` is deprecated, `docpie` is always ' 'case insensitive') kwargs = locals() argv = kwargs.pop('argv') pie = Docpie(**kwargs) pie.docpie(argv) return pie
Parse `argv` based on command-line interface described in `doc`. `docpie` creates your command-line interface based on its description that you pass as `doc`. Such description can contain --options, <positional-argument>, commands, which could be [optional], (required), (mutually | exclusive) or repeated... Parameters ---------- doc : str Description of your command-line interface. argv : list of str, optional Argument vector to be parsed. sys.argv is used if not provided. help : bool (default: True) Set to False to disable automatic help on -h or --help options. version : any object but None If passed, the object will be printed if --version is in `argv`. stdopt : bool (default: True) When it's True, long flag should only starts with -- attachopt: bool (default: True) write/pass several short flag into one, e.g. -abc can mean -a -b -c. This only works when stdopt=True attachvalue: bool (default: True) allow you to write short flag and its value together, e.g. -abc can mean -a bc auto2dashes: bool (default: True) automaticly handle -- (which means "end of command line flag") name: str (default: None) the "name" of your program. In each of your "usage" the "name" will be ignored. By default docpie will ignore the first element of your "usage". case_sensitive: bool (deprecated / default: False) specifies if it need case sensitive when matching "Usage:" and "Options:" optionsfirst: bool (default: False) everything after first positional argument will be interpreted as positional argument appearedonly: bool (default: False) when set True, the options that never appear in argv will not be put in result. Note this only affect options extra: dict customize pre-handled options. See http://docpie.comes.today/document/advanced-apis/ for more infomation. Returns ------- args : dict A dictionary, where keys are names of command-line elements such as e.g. "--verbose" and "<path>", and values are the parsed values of those elements. Example ------- >>> from docpie import docpie >>> doc = ''' ... Usage: ... my_program tcp <host> <port> [--timeout=<seconds>] ... my_program serial <port> [--baud=<n>] [--timeout=<seconds>] ... my_program (-h | --help | --version) ... ... Options: ... -h, --help Show this screen and exit. ... --baud=<n> Baudrate [default: 9600] ... ''' >>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30'] >>> docpie(doc, argv) { '--': False, '-h': False, '--baud': '9600', '--help': False, '--timeout': '30', '--version': False, '<host>': '127.0.0.1', '<port>': '80', 'serial': False, 'tcp': True} See also -------- * Full documentation is available in README.md as well as online at http://docpie.comes.today/document/quick-start/
def run_all(logdir, verbose=False): """Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins """ run_box_to_gaussian(logdir, verbose=verbose) run_sobel(logdir, verbose=verbose)
Run simulations on a reasonable set of parameters. Arguments: logdir: the directory into which to store all the runs' data verbose: if true, print out each run's name as it begins
def arg_completions( completion_text: str, parent_function: str, args: list, arg_idx: int, bel_spec: BELSpec, bel_fmt: str, species_id: str, namespace: str, size: int, ): """Function argument completion Only allow legal options for completion given function name, arguments and index of argument to replace. Args: completion_text: text to use for completion - used for creating highlight parent_function: BEL function containing these args args: arguments of BEL function arg_idx: completing on this argument identified by this index bel_spec: BEL Specification bel_fmt: short, medium, long BEL function/relation formats species_id: filter on this species id, e.g. TAX:9606 if available namespace: filter on this namespace if available size: number of completions to return Return: list of replacements """ function_long = bel_spec["functions"]["to_long"].get(parent_function) if not function_long: return [] signatures = bel_spec["functions"]["signatures"][function_long]["signatures"] # Position based argument ################################### function_list = [] entity_types = [] fn_replace_list, ns_arg_replace_list = [], [] position_flag = False # Signature matches position-based argument # Check for position based argument for signature in signatures: sig_arg = signature["arguments"][arg_idx] sig_type = sig_arg["type"] if sig_arg.get("position", False) and arg_idx == sig_arg["position"] - 1: position_flag = True if sig_type in ["Function", "Modifier"]: function_list.extend(sig_arg["values"]) elif sig_type in ["NSArg", "StrArgNSArg"]: entity_types.extend(sig_arg["values"]) if not position_flag: # Collect optional and multiple signature arguments for completion opt_fn_sig_args = [] opt_nsarg_sig_args = [] mult_fn_sig_args = [] mult_nsarg_sig_args = [] for signature in signatures: signature_opt_fn_sig_args = [] signature_opt_nsarg_sig_args = [] signature_mult_fn_sig_args = [] signature_mult_nsarg_sig_args = [] max_position = -1 for sig_arg in signature["arguments"]: if "position" in sig_arg: max_position = sig_arg["position"] continue # Skip position based signature arguments if ( sig_arg.get("optional", False) is True and sig_arg.get("multiple", False) is False ): if sig_arg["type"] in ["Function", "Modifier"]: signature_opt_fn_sig_args.extend(sig_arg["values"]) elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]: signature_opt_nsarg_sig_args.extend(sig_arg["values"]) elif sig_arg.get("multiple", False) is True: if sig_arg["type"] in ["Function", "Modifier"]: signature_mult_fn_sig_args.extend(sig_arg["values"]) elif sig_arg["type"] in ["NSArg", "StrArgNSArg"]: signature_mult_nsarg_sig_args.extend(sig_arg["values"]) # Remove signature non-multiple, optional arguments that are already in args list for idx, arg in enumerate(args): if idx <= max_position - 1: # Skip positional arguments continue if idx == arg_idx: # Skip argument to be completed continue log.debug(f"Remove Optional Args {arg} {signature_opt_fn_sig_args}") opt_fn_sig_args.extend(signature_opt_fn_sig_args) opt_nsarg_sig_args.extend(signature_opt_nsarg_sig_args) mult_fn_sig_args.extend(signature_mult_fn_sig_args) mult_nsarg_sig_args.extend(signature_mult_nsarg_sig_args) function_list.extend(list(set(opt_fn_sig_args + mult_fn_sig_args))) entity_types.extend(list(set(opt_nsarg_sig_args + mult_nsarg_sig_args))) if function_list: log.debug(f"ArgComp - position-based Function list: {function_list}") fn_replace_list = function_completions( completion_text, bel_spec, function_list, bel_fmt, size ) if entity_types: log.debug(f"ArgComp - position-based Entity types: {entity_types}") ns_arg_replace_list = nsarg_completions( completion_text, entity_types, bel_spec, namespace, species_id, bel_fmt, size, ) replace_list = fn_replace_list + ns_arg_replace_list return replace_list
Function argument completion Only allow legal options for completion given function name, arguments and index of argument to replace. Args: completion_text: text to use for completion - used for creating highlight parent_function: BEL function containing these args args: arguments of BEL function arg_idx: completing on this argument identified by this index bel_spec: BEL Specification bel_fmt: short, medium, long BEL function/relation formats species_id: filter on this species id, e.g. TAX:9606 if available namespace: filter on this namespace if available size: number of completions to return Return: list of replacements
def post_step(self, ctxt, step, idx, result): """ Called after executing a step. :param ctxt: An instance of ``timid.context.Context``. :param step: An instance of ``timid.steps.Step`` describing the step that was executed. :param idx: The index of the step in the list of steps. :param result: An instance of ``timid.steps.StepResult`` describing the result of executing the step. May be altered by the extension, e.g., to set the ``ignore`` attribute. :returns: The ``result`` parameter, for convenience. """ debugger = ExtensionDebugger('post_step') for ext in self.exts: with debugger(ext): ext.post_step(ctxt, step, idx, result) # Convenience return return result
Called after executing a step. :param ctxt: An instance of ``timid.context.Context``. :param step: An instance of ``timid.steps.Step`` describing the step that was executed. :param idx: The index of the step in the list of steps. :param result: An instance of ``timid.steps.StepResult`` describing the result of executing the step. May be altered by the extension, e.g., to set the ``ignore`` attribute. :returns: The ``result`` parameter, for convenience.
def add_intern_pattern(self, url=None): """Add intern URL regex to config.""" try: pat = self.get_intern_pattern(url=url) if pat: log.debug(LOG_CHECK, "Add intern pattern %r", pat) self.aggregate.config['internlinks'].append(get_link_pat(pat)) except UnicodeError as msg: res = _("URL has unparsable domain name: %(domain)s") % \ {"domain": msg} self.set_result(res, valid=False)
Add intern URL regex to config.
def _from_dict(cls, _dict): """Initialize a TableReturn object from a json dictionary.""" args = {} if 'document' in _dict: args['document'] = DocInfo._from_dict(_dict.get('document')) if 'model_id' in _dict: args['model_id'] = _dict.get('model_id') if 'model_version' in _dict: args['model_version'] = _dict.get('model_version') if 'tables' in _dict: args['tables'] = [ Tables._from_dict(x) for x in (_dict.get('tables')) ] return cls(**args)
Initialize a TableReturn object from a json dictionary.
def osx_clipboard_get(): """ Get the clipboard's text on OS X. """ p = subprocess.Popen(['pbpaste', '-Prefer', 'ascii'], stdout=subprocess.PIPE) text, stderr = p.communicate() # Text comes in with old Mac \r line endings. Change them to \n. text = text.replace('\r', '\n') return text
Get the clipboard's text on OS X.
def addbr(name): ''' Create new bridge with the given name ''' fcntl.ioctl(ifconfig.sockfd, SIOCBRADDBR, name) return Bridge(name)
Create new bridge with the given name
def print_roi(self, loglevel=logging.INFO): """Print information about the spectral and spatial properties of the ROI (sources, diffuse components).""" self.logger.log(loglevel, '\n' + str(self.roi))
Print information about the spectral and spatial properties of the ROI (sources, diffuse components).
def disconnect_async(self, connection_id, callback): """Asynchronously disconnect from a device that has previously been connected Args: connection_id (int): A unique identifier for this connection on the DeviceManager that owns this adapter. callback (callable): A function called as callback(connection_id, adapter_id, success, failure_reason) when the disconnection finishes. Disconnection can only either succeed or timeout. """ try: context = self.connections.get_context(connection_id) except ArgumentError: callback(connection_id, self.id, False, "Could not find connection information") return self.connections.begin_disconnection(connection_id, callback, self.get_config('default_timeout')) self.bable.disconnect( connection_handle=context['connection_handle'], on_disconnected=[self._on_disconnection_finished, context] )
Asynchronously disconnect from a device that has previously been connected Args: connection_id (int): A unique identifier for this connection on the DeviceManager that owns this adapter. callback (callable): A function called as callback(connection_id, adapter_id, success, failure_reason) when the disconnection finishes. Disconnection can only either succeed or timeout.
def delete_rule(name=None, localport=None, protocol=None, dir=None, remoteip=None): ''' .. versionadded:: 2015.8.0 Delete an existing firewall rule identified by name and optionally by ports, protocols, direction, and remote IP. Args: name (str): The name of the rule to delete. If the name ``all`` is used you must specify additional parameters. localport (Optional[str]): The port of the rule. If protocol is not specified, protocol will be set to ``tcp`` protocol (Optional[str]): The protocol of the rule. Default is ``tcp`` when ``localport`` is specified dir (Optional[str]): The direction of the rule. remoteip (Optional[str]): The remote IP of the rule. Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash # Delete incoming tcp port 8080 in the rule named 'test' salt '*' firewall.delete_rule 'test' '8080' 'tcp' 'in' # Delete the incoming tcp port 8000 from 192.168.0.1 in the rule named # 'test_remote_ip' salt '*' firewall.delete_rule 'test_remote_ip' '8000' 'tcp' 'in' '192.168.0.1' # Delete all rules for local port 80: salt '*' firewall.delete_rule all 80 tcp # Delete a rule called 'allow80': salt '*' firewall.delete_rule allow80 ''' cmd = ['netsh', 'advfirewall', 'firewall', 'delete', 'rule'] if name: cmd.append('name={0}'.format(name)) if protocol: cmd.append('protocol={0}'.format(protocol)) if dir: cmd.append('dir={0}'.format(dir)) if remoteip: cmd.append('remoteip={0}'.format(remoteip)) if protocol is None \ or ('icmpv4' not in protocol and 'icmpv6' not in protocol): if localport: if not protocol: cmd.append('protocol=tcp') cmd.append('localport={0}'.format(localport)) ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True) if ret['retcode'] != 0: raise CommandExecutionError(ret['stdout']) return True
.. versionadded:: 2015.8.0 Delete an existing firewall rule identified by name and optionally by ports, protocols, direction, and remote IP. Args: name (str): The name of the rule to delete. If the name ``all`` is used you must specify additional parameters. localport (Optional[str]): The port of the rule. If protocol is not specified, protocol will be set to ``tcp`` protocol (Optional[str]): The protocol of the rule. Default is ``tcp`` when ``localport`` is specified dir (Optional[str]): The direction of the rule. remoteip (Optional[str]): The remote IP of the rule. Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash # Delete incoming tcp port 8080 in the rule named 'test' salt '*' firewall.delete_rule 'test' '8080' 'tcp' 'in' # Delete the incoming tcp port 8000 from 192.168.0.1 in the rule named # 'test_remote_ip' salt '*' firewall.delete_rule 'test_remote_ip' '8000' 'tcp' 'in' '192.168.0.1' # Delete all rules for local port 80: salt '*' firewall.delete_rule all 80 tcp # Delete a rule called 'allow80': salt '*' firewall.delete_rule allow80
def cmd_zf(self, ch=None): """zf ch=chname Zoom the image for the given viewer/channel to fit the window. """ viewer = self.get_viewer(ch) if viewer is None: self.log("No current viewer/channel.") return viewer.zoom_fit() cur_lvl = viewer.get_zoom() self.log("zoom=%f" % (cur_lvl))
zf ch=chname Zoom the image for the given viewer/channel to fit the window.
def makediagram(edges): """make the diagram with the edges""" graph = pydot.Dot(graph_type='digraph') nodes = edges2nodes(edges) epnodes = [(node, makeanode(node[0])) for node in nodes if nodetype(node)=="epnode"] endnodes = [(node, makeendnode(node[0])) for node in nodes if nodetype(node)=="EndNode"] epbr = [(node, makeabranch(node)) for node in nodes if not istuple(node)] nodedict = dict(epnodes + epbr + endnodes) for value in list(nodedict.values()): graph.add_node(value) for e1, e2 in edges: graph.add_edge(pydot.Edge(nodedict[e1], nodedict[e2])) return graph
make the diagram with the edges
def set_elements_text(parent_to_parse, element_path=None, text_values=None): """ Assigns an array of text values to each of the elements parsed from the parent. The text values are assigned in the same order they are provided. If there are less values then elements, the remaining elements are skipped; but if there are more, new elements will be inserted for each with the remaining text values. """ if text_values is None: text_values = [] return _set_elements_property(parent_to_parse, element_path, _ELEM_TEXT, text_values)
Assigns an array of text values to each of the elements parsed from the parent. The text values are assigned in the same order they are provided. If there are less values then elements, the remaining elements are skipped; but if there are more, new elements will be inserted for each with the remaining text values.
def _michael_b(m_l, m_r): """Defined in 802.11i p.49""" m_r = m_r ^ _rotate_left32(m_l, 17) m_l = (m_l + m_r) % 2**32 m_r = m_r ^ _XSWAP(m_l) m_l = (m_l + m_r) % 2**32 m_r = m_r ^ _rotate_left32(m_l, 3) m_l = (m_l + m_r) % 2**32 m_r = m_r ^ _rotate_right32(m_l, 2) m_l = (m_l + m_r) % 2**32 return m_l, m_r
Defined in 802.11i p.49
def forward_kinematics(self, joints, full_kinematics=False): """Returns the transformation matrix of the forward kinematics Parameters ---------- joints: list The list of the positions of each joint. Note : Inactive joints must be in the list. full_kinematics: bool Return the transformation matrices of each joint Returns ------- frame_matrix: The transformation matrix """ frame_matrix = np.eye(4) if full_kinematics: frame_matrixes = [] if len(self.links) != len(joints): raise ValueError("Your joints vector length is {} but you have {} links".format(len(joints), len(self.links))) for index, (link, joint_angle) in enumerate(zip(self.links, joints)): # Compute iteratively the position # NB : Use asarray to avoid old sympy problems frame_matrix = np.dot(frame_matrix, np.asarray(link.get_transformation_matrix(joint_angle))) if full_kinematics: # rotation_axe = np.dot(frame_matrix, link.rotation) frame_matrixes.append(frame_matrix) # Return the matrix, or matrixes if full_kinematics: return frame_matrixes else: return frame_matrix
Returns the transformation matrix of the forward kinematics Parameters ---------- joints: list The list of the positions of each joint. Note : Inactive joints must be in the list. full_kinematics: bool Return the transformation matrices of each joint Returns ------- frame_matrix: The transformation matrix
def volume(self): """ Mesh volume - will throw a VTK error/warning if not a closed surface Returns ------- volume : float Total volume of the mesh. """ mprop = vtk.vtkMassProperties() mprop.SetInputData(self.tri_filter()) return mprop.GetVolume()
Mesh volume - will throw a VTK error/warning if not a closed surface Returns ------- volume : float Total volume of the mesh.
def datum_to_value(self, instance, datum): """Convert a given MAAS-side datum to a Python-side value. :param instance: The `Object` instance on which this field is currently operating. This method should treat it as read-only, for example to perform validation with regards to other fields. :param datum: The MAAS-side datum to validate and convert into a Python-side value. :return: A set of `cls` from the given datum. """ if datum is None: return [] if not isinstance(datum, Sequence): raise TypeError( "datum must be a sequence, not %s" % type(datum).__name__) # Get the class from the bound origin. bound = getattr(instance._origin, "FilesystemGroupDevices") return bound(( get_device_object(instance._origin, item) for item in datum ))
Convert a given MAAS-side datum to a Python-side value. :param instance: The `Object` instance on which this field is currently operating. This method should treat it as read-only, for example to perform validation with regards to other fields. :param datum: The MAAS-side datum to validate and convert into a Python-side value. :return: A set of `cls` from the given datum.
def get_unicodedata(version, output=HOME, no_zip=False): """Ensure we have Unicode data to generate Unicode tables.""" target = os.path.join(output, 'unicodedata', version) zip_target = os.path.join(output, 'unicodedata', '%s.zip' % version) if not os.path.exists(target) and os.path.exists(zip_target): unzip_unicode(output, version) # Download missing files if any. Zip if required. download_unicodedata(version, output, no_zip)
Ensure we have Unicode data to generate Unicode tables.
def get_category_drilldown(parser, token): """ Retrieves the specified category, its ancestors and its immediate children as an iterable. Syntax:: {% get_category_drilldown "category name" [using "app.Model"] as varname %} Example:: {% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %} or :: {% get_category_drilldown category_obj as family %} Sets family to:: Grandparent, Parent, Child 1, Child 2, Child n """ bits = token.split_contents() error_str = '%(tagname)s tag should be in the format {%% %(tagname)s ' \ '"category name" [using "app.Model"] as varname %%} or ' \ '{%% %(tagname)s category_obj as varname %%}.' if len(bits) == 4: if bits[2] != 'as': raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]}) if bits[2] == 'as': varname = bits[3].strip("'\"") model = "categories.category" if len(bits) == 6: if bits[2] not in ('using', 'as') or bits[4] not in ('using', 'as'): raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]}) if bits[2] == 'as': varname = bits[3].strip("'\"") model = bits[5].strip("'\"") if bits[2] == 'using': varname = bits[5].strip("'\"") model = bits[3].strip("'\"") category = FilterExpression(bits[1], parser) return CategoryDrillDownNode(category, varname, model)
Retrieves the specified category, its ancestors and its immediate children as an iterable. Syntax:: {% get_category_drilldown "category name" [using "app.Model"] as varname %} Example:: {% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %} or :: {% get_category_drilldown category_obj as family %} Sets family to:: Grandparent, Parent, Child 1, Child 2, Child n
def is_intersection(g, n): """ Determine if a node is an intersection graph: 1 -->-- 2 -->-- 3 >>> is_intersection(g, 2) False graph: 1 -- 2 -- 3 | 4 >>> is_intersection(g, 2) True Parameters ---------- g : networkx DiGraph n : node id Returns ------- bool """ return len(set(g.predecessors(n) + g.successors(n))) > 2
Determine if a node is an intersection graph: 1 -->-- 2 -->-- 3 >>> is_intersection(g, 2) False graph: 1 -- 2 -- 3 | 4 >>> is_intersection(g, 2) True Parameters ---------- g : networkx DiGraph n : node id Returns ------- bool
def make_index_for(package, index_dir, verbose=True): """ Create an 'index.html' for one package. :param package: Package object to use. :param index_dir: Where 'index.html' should be created. """ index_template = """\ <html> <head><title>{title}</title></head> <body> <h1>{title}</h1> <ul> {packages} </ul> </body> </html> """ item_template = '<li><a href="{1}">{0}</a></li>' index_filename = os.path.join(index_dir, "index.html") if not os.path.isdir(index_dir): os.makedirs(index_dir) parts = [] for pkg_filename in package.files: pkg_name = os.path.basename(pkg_filename) if pkg_name == "index.html": # -- ROOT-INDEX: pkg_name = os.path.basename(os.path.dirname(pkg_filename)) else: pkg_name = package.splitext(pkg_name) pkg_relpath_to = os.path.relpath(pkg_filename, index_dir) parts.append(item_template.format(pkg_name, pkg_relpath_to)) if not parts: print("OOPS: Package %s has no files" % package.name) return if verbose: root_index = not Package.isa(package.files[0]) if root_index: info = "with %d package(s)" % len(package.files) else: package_versions = sorted(set(package.versions)) info = ", ".join(reversed(package_versions)) message = "%-30s %s" % (package.name, info) print(message) with open(index_filename, "w") as f: packages = "\n".join(parts) text = index_template.format(title=package.name, packages=packages) f.write(text.strip()) f.close()
Create an 'index.html' for one package. :param package: Package object to use. :param index_dir: Where 'index.html' should be created.
def _check_query(self, query, style_cols=None): """Checks if query from Layer or QueryLayer is valid""" try: self.sql_client.send( utils.minify_sql(( 'EXPLAIN', 'SELECT', ' {style_cols}{comma}', ' the_geom, the_geom_webmercator', 'FROM ({query}) _wrap;', )).format(query=query, comma=',' if style_cols else '', style_cols=(','.join(style_cols) if style_cols else '')), do_post=False) except Exception as err: raise ValueError(('Layer query `{query}` and/or style column(s) ' '{cols} are not valid: {err}.' '').format(query=query, cols=', '.join(['`{}`'.format(c) for c in style_cols]), err=err))
Checks if query from Layer or QueryLayer is valid
def create_ports(port, mpi, rank): """create a list of ports for the current rank""" if port == "random" or port is None: # ports will be filled in using random binding ports = {} else: port = int(port) ports = { "REQ": port + 0, "PUSH": port + 1, "SUB": port + 2 } # if we want to communicate with separate domains # we have to setup a socket for each of them if mpi == 'all': # use a socket for each rank rank for port in ports: ports[port] += (rank * 3) return ports
create a list of ports for the current rank
def add_request(self, request): """ Add a request object """ # Duplicate request name? if request.name in self.requests: raise ValueError('redefinition of request "{0}"'.format(request.name)) self.requests[request.name] = request # Add the request URLs for method, url in request.urls: # URL with arguments? if RE_URL_ARG.search(url): request_regex = '^' + RE_URL_ARG_ESC.sub(r'/(?P<\1>[^/]+)', re.escape(url)) + '$' self.__request_regex.append((method, re.compile(request_regex), request)) else: request_key = (method, url) if request_key in self.__request_urls: raise ValueError('redefinition of request URL "{0}"'.format(url)) self.__request_urls[request_key] = request
Add a request object
def from_text(text): """Convert text into an opcode. @param text: the textual opcode @type text: string @raises UnknownOpcode: the opcode is unknown @rtype: int """ if text.isdigit(): value = int(text) if value >= 0 and value <= 15: return value value = _by_text.get(text.upper()) if value is None: raise UnknownOpcode return value
Convert text into an opcode. @param text: the textual opcode @type text: string @raises UnknownOpcode: the opcode is unknown @rtype: int
def _init_index(root_dir, schema, index_name): """ Creates new index or opens existing. Args: root_dir (str): root dir where to find or create index. schema (whoosh.fields.Schema): schema of the index to create or open. index_name (str): name of the index. Returns: tuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory. """ index_dir = os.path.join(root_dir, index_name) try: if not os.path.exists(index_dir): os.makedirs(index_dir) return create_in(index_dir, schema), index_dir else: return open_dir(index_dir), index_dir except Exception as e: logger.error("Init error: failed to open search index at: '{}': {} ".format(index_dir, e)) raise
Creates new index or opens existing. Args: root_dir (str): root dir where to find or create index. schema (whoosh.fields.Schema): schema of the index to create or open. index_name (str): name of the index. Returns: tuple ((whoosh.index.FileIndex, str)): first element is index, second is index directory.
def validate_subfolders(filedir, metadata): """ Check that all folders in the given directory have a corresponding entry in the metadata file, and vice versa. :param filedir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched. """ if not os.path.isdir(filedir): print("Error: " + filedir + " is not a directory") return False subfolders = os.listdir(filedir) for subfolder in subfolders: if subfolder not in metadata: print("Error: folder " + subfolder + " present on disk but not in metadata") return False for subfolder in metadata: if subfolder not in subfolders: print("Error: folder " + subfolder + " present in metadata but not on disk") return False return True
Check that all folders in the given directory have a corresponding entry in the metadata file, and vice versa. :param filedir: This field is the target directory from which to match metadata :param metadata: This field contains the metadata to be matched.
def match_aspect_to_viewport(self): """Updates Camera.aspect to match the viewport's aspect ratio.""" viewport = self.viewport self.aspect = float(viewport.width) / viewport.height
Updates Camera.aspect to match the viewport's aspect ratio.
def get_user(self, user): """Get user's data (first and last name, email, etc). Args: user (string): User name. Returns: (dictionary): User's data encoded in a dictionary. Raises: requests.HTTPError on failure. """ return self.service.get_user( user, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get user's data (first and last name, email, etc). Args: user (string): User name. Returns: (dictionary): User's data encoded in a dictionary. Raises: requests.HTTPError on failure.
def average_gradient(data, *kwargs): """ Compute average gradient norm of an image """ return np.average(np.array(np.gradient(data))**2)
Compute average gradient norm of an image
def natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True): """\ Sorts a sequence naturally (alphabetically and numerically), not lexicographically. >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) ['num2', 'num3', 'num5'] >>> b = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')] >>> from operator import itemgetter >>> natsorted(b, key=itemgetter(1)) [('c', 'num2'), ('a', 'num3'), ('b', 'num5')] """ return sorted(seq, key=lambda x: natsort_key(key(x), number_type=number_type, signed=signed, exp=exp))
\ Sorts a sequence naturally (alphabetically and numerically), not lexicographically. >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) ['num2', 'num3', 'num5'] >>> b = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')] >>> from operator import itemgetter >>> natsorted(b, key=itemgetter(1)) [('c', 'num2'), ('a', 'num3'), ('b', 'num5')]
def send_output_report(self, data): """Send input/output/feature report ID = report_id, data should be a c_ubyte object with included the required report data """ assert( self.is_opened() ) #make sure we have c_ubyte array storage if not ( isinstance(data, ctypes.Array) and \ issubclass(data._type_, c_ubyte) ): raw_data_type = c_ubyte * len(data) raw_data = raw_data_type() for index in range( len(data) ): raw_data[index] = data[index] else: raw_data = data # # Adding a lock when writing (overlapped writes) over_write = winapi.OVERLAPPED() over_write.h_event = winapi.CreateEvent(None, 0, 0, None) if over_write.h_event: try: overlapped_write = over_write winapi.WriteFile(int(self.hid_handle), byref(raw_data), len(raw_data), None, byref(overlapped_write)) #none overlapped error = ctypes.GetLastError() if error == winapi.ERROR_IO_PENDING: # overlapped operation in progress result = error elif error == 1167: raise HIDError("Error device disconnected before write") else: raise HIDError("Error %d when trying to write to HID "\ "device: %s"%(error, ctypes.FormatError(error)) ) result = winapi.WaitForSingleObject(overlapped_write.h_event, 10000 ) if result != winapi.WAIT_OBJECT_0: # If the write times out make sure to # cancel it, otherwise memory could # get corrupted if the async write # completes after this functions returns winapi.CancelIo( int(self.hid_handle) ) raise HIDError("Write timed out") finally: # Make sure the event is closed so resources aren't leaked winapi.CloseHandle(over_write.h_event) else: return winapi.WriteFile(int(self.hid_handle), byref(raw_data), len(raw_data), None, None) #none overlapped return True
Send input/output/feature report ID = report_id, data should be a c_ubyte object with included the required report data
def miraligner(args): """ Realign BAM hits to miRBAse to get better accuracy and annotation """ hairpin, mirna = _download_mirbase(args) precursors = _read_precursor(args.hairpin, args.sps) matures = _read_mature(args.mirna, args.sps) gtf = _read_gtf(args.gtf) out_dts = [] out_files = [] for bam_fn in args.files: sample = op.splitext(op.basename(bam_fn))[0] logger.info("Reading %s" % bam_fn) if bam_fn.endswith("bam") or bam_fn.endswith("sam"): bam_fn = _sam_to_bam(bam_fn) bam_sort_by_n = op.splitext(bam_fn)[0] + "_sort" pysam.sort("-n", bam_fn, bam_sort_by_n) reads = _read_bam(bam_sort_by_n + ".bam", precursors) elif bam_fn.endswith("fasta") or bam_fn.endswith("fa") or \ bam_fn.endswith("fastq"): if args.collapse: bam_fn = _collapse_fastq(bam_fn) out_file = op.join(args.out, sample + ".premirna") bam_fn = _filter_seqs(bam_fn) if args.miraligner: _cmd_miraligner(bam_fn, out_file, args.sps, args.hairpin, args.out) reads = _read_miraligner(out_file) out_files.append(out_file) else: raise ValueError("Format not recognized.") if args.miraligner: _mirtop(out_files, args.hairpin, args.gtf, args.sps, args.out) if not args.miraligner: reads = _annotate(reads, matures, precursors) out_file = op.join(args.out, sample + ".mirna") out_file, dt, dt_pre = _tab_output(reads, out_file, sample) try: vcf_file = op.join(args.out, sample + ".vcf") if not file_exists(vcf_file): # if True: create_vcf(dt_pre, matures, gtf, vcf_file) try: import vcf vcf.Reader(filename=vcf_file) except Exception as e: logger.warning(e.__doc__) logger.warning(e.message) except Exception as e: # traceback.print_exc() logger.warning(e.__doc__) logger.warning(e.message) if isinstance(dt, pd.DataFrame): out_dts.append(dt) if out_dts: _create_counts(out_dts, args.out) else: print("No files analyzed!")
Realign BAM hits to miRBAse to get better accuracy and annotation
def do_powershell_complete(cli, prog_name): """Do the powershell completion Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line Returns ------- bool True if the completion was successful, False otherwise """ commandline = os.environ['COMMANDLINE'] args = split_args(commandline)[1:] quote = single_quote incomplete = '' if args and not commandline.endswith(' '): incomplete = args[-1] args = args[:-1] quote_pos = commandline.rfind(incomplete) - 1 if quote_pos >= 0 and commandline[quote_pos] == '"': quote = double_quote for item, help in get_choices(cli, prog_name, args, incomplete): echo(quote(item)) return True
Do the powershell completion Parameters ---------- cli : click.Command The main click Command of the program prog_name : str The program name on the command line Returns ------- bool True if the completion was successful, False otherwise
def single_value(cls, value, shape, pixel_scale, origin=(0.0, 0.0)): """ Creates an instance of Array and fills it with a single value Parameters ---------- value: float The value with which the array should be filled shape: (int, int) The shape of the array pixel_scale: float The scale of a pixel in arc seconds Returns ------- array: ScaledSquarePixelArray An array filled with a single value """ array = np.ones(shape) * value return cls(array, pixel_scale, origin)
Creates an instance of Array and fills it with a single value Parameters ---------- value: float The value with which the array should be filled shape: (int, int) The shape of the array pixel_scale: float The scale of a pixel in arc seconds Returns ------- array: ScaledSquarePixelArray An array filled with a single value
def minimum_katcp_version(major, minor=0): """Decorator; exclude handler if server's protocol version is too low Useful for including default handler implementations for KATCP features that are only present in certain KATCP protocol versions Examples -------- >>> class MyDevice(DeviceServer): ... '''This device server will expose ?myreq''' ... PROTOCOL_INFO = katcp.core.ProtocolFlags(5, 1) ... ... @minimum_katcp_version(5, 1) ... def request_myreq(self, req, msg): ... '''A request that should only be present for KATCP >v5.1''' ... # Request handler implementation here. ... >>> class MyOldDevice(MyDevice): ... '''This device server will not expose ?myreq''' ... ... PROTOCOL_INFO = katcp.core.ProtocolFlags(5, 0) ... """ version_tuple = (major, minor) def decorator(handler): handler._minimum_katcp_version = version_tuple return handler return decorator
Decorator; exclude handler if server's protocol version is too low Useful for including default handler implementations for KATCP features that are only present in certain KATCP protocol versions Examples -------- >>> class MyDevice(DeviceServer): ... '''This device server will expose ?myreq''' ... PROTOCOL_INFO = katcp.core.ProtocolFlags(5, 1) ... ... @minimum_katcp_version(5, 1) ... def request_myreq(self, req, msg): ... '''A request that should only be present for KATCP >v5.1''' ... # Request handler implementation here. ... >>> class MyOldDevice(MyDevice): ... '''This device server will not expose ?myreq''' ... ... PROTOCOL_INFO = katcp.core.ProtocolFlags(5, 0) ...
def focusd(task): """ Forks the current process as a daemon to run a task. `task` ``Task`` instance for the task to run. """ # determine if command server should be started if registration.get_registered(event_hooks=True, root_access=True): # root event plugins available start_cmd_srv = (os.getuid() == 0) # must be root else: start_cmd_srv = False # daemonize our current process _run = lambda: Focusd(task).run(start_cmd_srv) daemonize(get_daemon_pidfile(task), task.task_dir, _run)
Forks the current process as a daemon to run a task. `task` ``Task`` instance for the task to run.
def get_task(self, task=0, timeout=None, block=True): """ Returns an iterator which results are limited to one **task**. The default iterator the one which e.g. will be used in a for loop is the iterator for the first task (task =0). The returned iterator is a ``_NuMapTask`` instance. Compare:: for result_from_task_0 in imap_instance: pass with:: for result_from_task_1 in imap_instance.get_task(task_id =1): pass a typical use case is:: task_0_iterator = imap_instance.get_task(task_id =0) task_1_iterator = imap_instance.get_task(task_id =1) for (task_1_res, task_0_res) in izip(task_0_iterator, task_1_iterator): pass """ return _NuMapTask(self, task=task, timeout=timeout, block=block)
Returns an iterator which results are limited to one **task**. The default iterator the one which e.g. will be used in a for loop is the iterator for the first task (task =0). The returned iterator is a ``_NuMapTask`` instance. Compare:: for result_from_task_0 in imap_instance: pass with:: for result_from_task_1 in imap_instance.get_task(task_id =1): pass a typical use case is:: task_0_iterator = imap_instance.get_task(task_id =0) task_1_iterator = imap_instance.get_task(task_id =1) for (task_1_res, task_0_res) in izip(task_0_iterator, task_1_iterator): pass
def getTotalw(self): """Returns the cumulative w for all the fields in the dataset""" w = sum([field.w for field in self.fields]) return w
Returns the cumulative w for all the fields in the dataset
def is_training_modified(self): """ Returns `True` if training data was modified since last training. Returns `False` otherwise, or if using builtin training data. """ last_modified = self.trainer.get_last_modified() if last_modified > self.training_timestamp: return True else: return False
Returns `True` if training data was modified since last training. Returns `False` otherwise, or if using builtin training data.
def list(self, **params): """ Retrieve visit outcomes Returns Visit Outcomes, according to the parameters provided :calls: ``get /visit_outcomes`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of VisitOutcomes. :rtype: list """ _, _, visit_outcomes = self.http_client.get("/visit_outcomes", params=params) return visit_outcomes
Retrieve visit outcomes Returns Visit Outcomes, according to the parameters provided :calls: ``get /visit_outcomes`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of VisitOutcomes. :rtype: list
def addGlobalServices(self): """ This is where we put service that we don't want to be duplicated on worker subprocesses """ if self.options.get('global_cache') and self.options.get('cache'): # only add the cache service here if the global_cache and cache # options were set to True _cache = self.getCacheService() _cache.startService()
This is where we put service that we don't want to be duplicated on worker subprocesses
def untranslateName(s): """Undo Python conversion of CL parameter or variable name.""" s = s.replace('DOT', '.') s = s.replace('DOLLAR', '$') # delete 'PY' at start of name components if s[:2] == 'PY': s = s[2:] s = s.replace('.PY', '.') return s
Undo Python conversion of CL parameter or variable name.
def make_matrix(version, reserve_regions=True, add_timing=True): """\ Creates a matrix of the provided `size` (w x h) initialized with the (illegal) value 0x2. The "timing pattern" is already added to the matrix and the version and format areas are initialized with 0x0. :param int version: The (Micro) QR Code version :rtype: tuple of bytearrays """ size = calc_matrix_size(version) row = [0x2] * size matrix = tuple([bytearray(row) for i in range(size)]) if reserve_regions: if version > 6: # Reserve version pattern areas for i in range(6): # Upper right matrix[i][-11] = 0x0 matrix[i][-10] = 0x0 matrix[i][-9] = 0x0 # Lower left matrix[-11][i] = 0x0 matrix[-10][i] = 0x0 matrix[-9][i] = 0x0 # Reserve format pattern areas for i in range(9): matrix[i][8] = 0x0 # Upper left matrix[8][i] = 0x0 # Upper bottom if version > 0: matrix[-i][8] = 0x0 # Bottom left matrix[8][- i] = 0x0 # Upper right if add_timing: # ISO/IEC 18004:2015 -- 6.3.5 Timing pattern (page 17) add_timing_pattern(matrix, version < 1) return matrix
\ Creates a matrix of the provided `size` (w x h) initialized with the (illegal) value 0x2. The "timing pattern" is already added to the matrix and the version and format areas are initialized with 0x0. :param int version: The (Micro) QR Code version :rtype: tuple of bytearrays
def Datetime(null=True, **kwargs): """A datetime property.""" return Property( types=datetime.datetime, convert=util.local_timezone, load=dateutil.parser.parse, null=null, **kwargs )
A datetime property.
def defaults(d1, d2): """ Update a copy of d1 with the contents of d2 that are not in d1. d1 and d2 are dictionary like objects. Parameters ---------- d1 : dict | dataframe dict with the preferred values d2 : dict | dataframe dict with the default values Returns ------- out : dict | dataframe Result of adding default values type of d1 """ d1 = d1.copy() tolist = isinstance(d2, pd.DataFrame) keys = (k for k in d2 if k not in d1) for k in keys: if tolist: d1[k] = d2[k].tolist() else: d1[k] = d2[k] return d1
Update a copy of d1 with the contents of d2 that are not in d1. d1 and d2 are dictionary like objects. Parameters ---------- d1 : dict | dataframe dict with the preferred values d2 : dict | dataframe dict with the default values Returns ------- out : dict | dataframe Result of adding default values type of d1
def find_and_filter_sgf_files(base_dir, min_year=None, komi=None): """Finds all sgf files in base_dir with year >= min_year and komi""" sgf_files = [] for dirpath, dirnames, filenames in os.walk(base_dir): for filename in filenames: if filename.endswith('.sgf'): path = os.path.join(dirpath, filename) sgf_files.append(path) if min_year == komi == None: print ("Found {} sgf_files".format(len(sgf_files))) return sgf_files f = filter_year_komi(min_year, komi) filtered_sgf_files = [sgf for sgf in tqdm(sgf_files) if f(sgf)] print("{} of {} .sgf files matched (min_year >= {}, komi = {})".format( len(filtered_sgf_files), len(sgf_files), min_year, komi)) return filtered_sgf_files
Finds all sgf files in base_dir with year >= min_year and komi
def Points(plist, r=5, c="gray", alpha=1): """ Build a point ``Actor`` for a list of points. :param float r: point radius. :param c: color name, number, or list of [R,G,B] colors of same length as plist. :type c: int, str, list :param float alpha: transparency in range [0,1]. .. hint:: |lorenz| |lorenz.py|_ """ def _colorPoints(plist, cols, r, alpha): n = len(plist) if n > len(cols): colors.printc("~times Error: mismatch in colorPoints()", n, len(cols), c=1) exit() if n != len(cols): colors.printc("~lightning Warning: mismatch in colorPoints()", n, len(cols)) src = vtk.vtkPointSource() src.SetNumberOfPoints(n) src.Update() vgf = vtk.vtkVertexGlyphFilter() vgf.SetInputData(src.GetOutput()) vgf.Update() pd = vgf.GetOutput() ucols = vtk.vtkUnsignedCharArray() ucols.SetNumberOfComponents(3) ucols.SetName("pointsRGB") for i in range(len(plist)): c = np.array(colors.getColor(cols[i])) * 255 ucols.InsertNextTuple3(c[0], c[1], c[2]) pd.GetPoints().SetData(numpy_to_vtk(plist, deep=True)) pd.GetPointData().SetScalars(ucols) actor = Actor(pd, c, alpha) actor.mapper.ScalarVisibilityOn() actor.GetProperty().SetInterpolationToFlat() actor.GetProperty().SetPointSize(r) settings.collectable_actors.append(actor) return actor n = len(plist) if n == 0: return None elif n == 3: # assume plist is in the format [all_x, all_y, all_z] if utils.isSequence(plist[0]) and len(plist[0]) > 3: plist = list(zip(plist[0], plist[1], plist[2])) elif n == 2: # assume plist is in the format [all_x, all_y, 0] if utils.isSequence(plist[0]) and len(plist[0]) > 3: plist = list(zip(plist[0], plist[1], [0] * len(plist[0]))) if utils.isSequence(c) and len(c) > 3: actor = _colorPoints(plist, c, r, alpha) settings.collectable_actors.append(actor) return actor ################ n = len(plist) # refresh sourcePoints = vtk.vtkPoints() sourceVertices = vtk.vtkCellArray() is3d = len(plist[0]) > 2 if is3d: # its faster for pt in plist: aid = sourcePoints.InsertNextPoint(pt) sourceVertices.InsertNextCell(1) sourceVertices.InsertCellPoint(aid) else: for pt in plist: aid = sourcePoints.InsertNextPoint(pt[0], pt[1], 0) sourceVertices.InsertNextCell(1) sourceVertices.InsertCellPoint(aid) pd = vtk.vtkPolyData() pd.SetPoints(sourcePoints) pd.SetVerts(sourceVertices) if n == 1: # passing just one point pd.GetPoints().SetPoint(0, [0, 0, 0]) else: pd.GetPoints().SetData(numpy_to_vtk(plist, deep=True)) actor = Actor(pd, c, alpha) actor.GetProperty().SetPointSize(r) if n == 1: actor.SetPosition(plist[0]) settings.collectable_actors.append(actor) return actor
Build a point ``Actor`` for a list of points. :param float r: point radius. :param c: color name, number, or list of [R,G,B] colors of same length as plist. :type c: int, str, list :param float alpha: transparency in range [0,1]. .. hint:: |lorenz| |lorenz.py|_
def vcs_upload(): """ Uploads the project with the selected VCS tool. """ if env.deploy_tool == "git": remote_path = "ssh://%s@%s%s" % (env.user, env.host_string, env.repo_path) if not exists(env.repo_path): run("mkdir -p %s" % env.repo_path) with cd(env.repo_path): run("git init --bare") local("git push -f %s master" % remote_path) with cd(env.repo_path): run("GIT_WORK_TREE=%s git checkout -f master" % env.proj_path) run("GIT_WORK_TREE=%s git reset --hard" % env.proj_path) elif env.deploy_tool == "hg": remote_path = "ssh://%s@%s/%s" % (env.user, env.host_string, env.repo_path) with cd(env.repo_path): if not exists("%s/.hg" % env.repo_path): run("hg init") print(env.repo_path) with fab_settings(warn_only=True): push = local("hg push -f %s" % remote_path) if push.return_code == 255: abort() run("hg update")
Uploads the project with the selected VCS tool.
def inFootprint(footprint,ra,dec): """ Check if set of ra,dec combinations are in footprint. Careful, input files must be in celestial coordinates. filename : Either healpix map or mangle polygon file ra,dec : Celestial coordinates Returns: inside : boolean array of coordinates in footprint """ if footprint is None: return np.ones(len(ra),dtype=bool) try: if isinstance(footprint,str) and os.path.exists(footprint): filename = footprint #footprint = hp.read_map(filename,verbose=False) #footprint = fitsio.read(filename)['I'].ravel() footprint = read_map(filename) nside = hp.npix2nside(len(footprint)) pix = ang2pix(nside,ra,dec) inside = (footprint[pix] > 0) except IOError: logger.warning("Failed to load healpix footprint; trying to use mangle...") inside = inMangle(filename,ra,dec) return inside
Check if set of ra,dec combinations are in footprint. Careful, input files must be in celestial coordinates. filename : Either healpix map or mangle polygon file ra,dec : Celestial coordinates Returns: inside : boolean array of coordinates in footprint
def values(self): """return a list of all state values""" values = [] for __, data in self.items(): values.append(data) return values
return a list of all state values
def remove(self, branch, turn, tick): """Delete data on or after this tick On the assumption that the future has been invalidated. """ for parent, entitys in list(self.parents.items()): for entity, keys in list(entitys.items()): for key, branchs in list(keys.items()): if branch in branchs: branhc = branchs[branch] if turn in branhc: trun = branhc[turn] if tick in trun: del trun[tick] trun.truncate(tick) if not trun: del branhc[turn] branhc.truncate(turn) if not branhc: del branchs[branch] if not branchs: del keys[key] if not keys: del entitys[entity] if not entitys: del self.parents[parent] for branchkey, branches in list(self.branches.items()): if branch in branches: branhc = branches[branch] if turn in branhc: trun = branhc[turn] if tick in trun: del trun[tick] trun.truncate(tick) if not trun: del branhc[turn] branhc.truncate(turn) if not branhc: del branches[branch] if not branches: del self.branches[branchkey] for keykey, keys in list(self.keys.items()): for key, branchs in list(keys.items()): if branch in branchs: branhc = branchs[branch] if turn in branhc: trun = branhc[turn] if tick in trun: del trun[tick] trun.truncate(tick) if not trun: del branhc[turn] branhc.truncate(turn) if not branhc: del branches[branch] if not branchs: del keys[key] if not keys: del self.keys[keykey] sets = self.settings[branch] if turn in sets: setsturn = sets[turn] if tick in setsturn: del setsturn[tick] setsturn.truncate(tick) if not setsturn: del sets[turn] sets.truncate(turn) if not sets: del self.settings[branch] presets = self.presettings[branch] if turn in presets: presetsturn = presets[turn] if tick in presetsturn: del presetsturn[tick] presetsturn.truncate(tick) if not presetsturn: del presets[turn] presets.truncate(turn) if not presets: del self.presettings[branch] for entity, brnch in list(self.keycache): if brnch == branch: kc = self.keycache[entity, brnch] if turn in kc: kcturn = kc[turn] if tick in kcturn: del kcturn[tick] kcturn.truncate(tick) if not kcturn: del kc[turn] kc.truncate(turn) if not kc: del self.keycache[entity, brnch] self.shallowest = OrderedDict() self.send(self, branch=branch, turn=turn, tick=tick, action='remove')
Delete data on or after this tick On the assumption that the future has been invalidated.
def hmget(self, *args): """ This command on the model allow getting many instancehash fields with only one redis call. You must pass hash name to retrieve as arguments. """ if args and not any(arg in self._instancehash_fields for arg in args): raise ValueError("Only InstanceHashField can be used here.") return self._call_command('hmget', args)
This command on the model allow getting many instancehash fields with only one redis call. You must pass hash name to retrieve as arguments.
def add(self, data): """ Add a list of item into the container :data: dict of items & value per hostname """ for host in data: for key in data[host]: if not data[host][key] == []: self.add_item(host, key, data[host][key])
Add a list of item into the container :data: dict of items & value per hostname
def _validate_num_units(num_units, service_name, add_error): """Check that the given num_units is valid. Use the given service name to describe possible errors. Use the given add_error callable to register validation error. If no errors are encountered, return the number of units as an integer. Return None otherwise. """ if num_units is None: # This should be a subordinate charm. return 0 try: num_units = int(num_units) except (TypeError, ValueError): add_error( 'num_units for service {} must be a digit'.format(service_name)) return if num_units < 0: add_error( 'num_units {} for service {} must be a positive digit' ''.format(num_units, service_name)) return return num_units
Check that the given num_units is valid. Use the given service name to describe possible errors. Use the given add_error callable to register validation error. If no errors are encountered, return the number of units as an integer. Return None otherwise.
def color(self): """Line color in IDA View""" color = idc.GetColor(self.ea, idc.CIC_ITEM) if color == 0xFFFFFFFF: return None return color
Line color in IDA View
def search(self, word, limit=30): """ Search for a word within the wordgatherer collection. :param word: Word to search for. :param limit: Maximum number of results to return. """ search = Search(PrefixQuery("word", word), sort={"count": "desc"}) for doc in self.connection.search( search, indexes=[self.index], count=limit): yield (doc["word"], doc["count"])
Search for a word within the wordgatherer collection. :param word: Word to search for. :param limit: Maximum number of results to return.
def put(self, key, value): '''Stores the object `value` named by `key` in `service`. Args: key: Key naming `value`. value: the object to store. ''' key = self._service_key(key) self._service_ops['put'](key, value)
Stores the object `value` named by `key` in `service`. Args: key: Key naming `value`. value: the object to store.
def meanprecision(a): '''Mean and precision of Dirichlet distribution. Parameters ---------- a : array Parameters of Dirichlet distribution. Returns ------- mean : array Numbers [0,1] of the means of the Dirichlet distribution. precision : float Precision or concentration parameter of the Dirichlet distribution.''' s = a.sum() m = a / s return (m,s)
Mean and precision of Dirichlet distribution. Parameters ---------- a : array Parameters of Dirichlet distribution. Returns ------- mean : array Numbers [0,1] of the means of the Dirichlet distribution. precision : float Precision or concentration parameter of the Dirichlet distribution.
def t_RP(self, t): r'[])]' if t.value != ']' and OPTIONS.bracket.value: t.type = 'RPP' return t
r'[])]
def initialize_layers(self, layers=None): """Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def` """ if layers is not None: self.layers = layers self.layers_ = Layers() #If a Layer, or a list of Layers was passed in if isinstance(self.layers[0], Layer): for out_layer in self.layers: for i, layer in enumerate(get_all_layers(out_layer)): if layer not in self.layers_.values(): name = layer.name or self._layer_name(layer.__class__, i) self.layers_[name] = layer if self._get_params_for(name) != {}: raise ValueError( "You can't use keyword params when passing a Lasagne " "instance object as the 'layers' parameter of " "'NeuralNet'." ) self._output_layers = self.layers return self.layers # 'self.layers' are a list of '(Layer class, kwargs)', so # we'll have to actually instantiate the layers given the # arguments: layer = None for i, layer_def in enumerate(self.layers): if isinstance(layer_def[1], dict): # Newer format: (Layer, {'layer': 'kwargs'}) layer_factory, layer_kw = layer_def layer_kw = layer_kw.copy() else: # The legacy format: ('name', Layer) layer_name, layer_factory = layer_def layer_kw = {'name': layer_name} if isinstance(layer_factory, str): layer_factory = locate(layer_factory) assert layer_factory is not None if 'name' not in layer_kw: layer_kw['name'] = self._layer_name(layer_factory, i) more_params = self._get_params_for(layer_kw['name']) layer_kw.update(more_params) if layer_kw['name'] in self.layers_: raise ValueError( "Two layers with name {}.".format(layer_kw['name'])) # Any layers that aren't subclasses of InputLayer are # assumed to require an 'incoming' paramter. By default, # we'll use the previous layer as input: try: is_input_layer = issubclass(layer_factory, InputLayer) except TypeError: is_input_layer = False if not is_input_layer: if 'incoming' in layer_kw: layer_kw['incoming'] = self.layers_[ layer_kw['incoming']] elif 'incomings' in layer_kw: layer_kw['incomings'] = [ self.layers_[name] for name in layer_kw['incomings']] else: layer_kw['incoming'] = layer # Deal with additional string parameters that may # reference other layers; currently only 'mask_input'. for param in self.layer_reference_params: if param in layer_kw: val = layer_kw[param] if isinstance(val, basestring): layer_kw[param] = self.layers_[val] for attr in ('W', 'b'): if isinstance(layer_kw.get(attr), str): name = layer_kw[attr] layer_kw[attr] = getattr(self.layers_[name], attr, None) try: layer_wrapper = layer_kw.pop('layer_wrapper', None) layer = layer_factory(**layer_kw) except TypeError as e: msg = ("Failed to instantiate {} with args {}.\n" "Maybe parameter names have changed?".format( layer_factory, layer_kw)) chain_exception(TypeError(msg), e) self.layers_[layer_kw['name']] = layer if layer_wrapper is not None: layer = layer_wrapper(layer) self.layers_["LW_%s" % layer_kw['name']] = layer self._output_layers = [layer] return [layer]
Sets up the Lasagne layers :param layers: The dictionary of layers, or a :class:`lasagne.Layers` instance, describing the underlying network :return: the output layer of the underlying lasagne network. :seealso: :ref:`layer-def`