code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def __parse_names(): '''Gets and parses file''' filename = get_file('names.tsv.gz') with io.open(filename, 'r', encoding='cp1252') as textfile: next(textfile) for line in textfile: tokens = line.strip().split('\t') chebi_id = int(tokens[1]) if chebi_id not in __ALL_NAMES: __ALL_NAMES[chebi_id] = [] # Append Name: nme = Name(tokens[4], tokens[2], tokens[3], tokens[5] == 'T', tokens[6]) __ALL_NAMES[chebi_id].append(nme)
Gets and parses file
def _get_libvirt_enum_string(prefix, value): ''' Convert the libvirt enum integer value into a human readable string. :param prefix: start of the libvirt attribute to look for. :param value: integer to convert to string ''' attributes = [attr[len(prefix):] for attr in libvirt.__dict__ if attr.startswith(prefix)] # Filter out the values starting with a common base as they match another enum prefixes = [_compute_subprefix(p) for p in attributes] counts = {p: prefixes.count(p) for p in prefixes} sub_prefixes = [p for p, count in counts.items() if count > 1 or (p.endswith('_') and p[:-1] in prefixes)] filtered = [attr for attr in attributes if _compute_subprefix(attr) not in sub_prefixes] for candidate in filtered: if value == getattr(libvirt, ''.join((prefix, candidate))): name = candidate.lower().replace('_', ' ') return name return 'unknown'
Convert the libvirt enum integer value into a human readable string. :param prefix: start of the libvirt attribute to look for. :param value: integer to convert to string
def delay(self, secs): """Delay some seconds Args: secs: float seconds Returns: self """ secs = int(secs) for i in reversed(range(secs)): sys.stdout.write('\r') sys.stdout.write("sleep %ds, left %2ds" % (secs, i+1)) sys.stdout.flush() time.sleep(1) sys.stdout.write("\n") return self
Delay some seconds Args: secs: float seconds Returns: self
def _create_validate_config(vrn_file, rm_file, rm_interval_file, base_dir, data): """Create a bcbio.variation configuration input for validation. """ ref_call = {"file": str(rm_file), "name": "ref", "type": "grading-ref", "fix-sample-header": True, "remove-refcalls": True} a_intervals = get_analysis_intervals(data, vrn_file, base_dir) if a_intervals: a_intervals = shared.remove_lcr_regions(a_intervals, [data]) if rm_interval_file: ref_call["intervals"] = rm_interval_file eval_call = {"file": vrn_file, "name": "eval", "remove-refcalls": True} exp = {"sample": data["name"][-1], "ref": dd.get_ref_file(data), "approach": "grade", "calls": [ref_call, eval_call]} if a_intervals: exp["intervals"] = os.path.abspath(a_intervals) if data.get("align_bam"): exp["align"] = data["align_bam"] elif data.get("work_bam"): exp["align"] = data["work_bam"] return {"dir": {"base": base_dir, "out": "work", "prep": "work/prep"}, "experiments": [exp]}
Create a bcbio.variation configuration input for validation.
def sendmail(self, msg_from, msg_to, msg): """Remember the recipients.""" SMTP_dummy.msg_from = msg_from SMTP_dummy.msg_to = msg_to SMTP_dummy.msg = msg
Remember the recipients.
def cmd_partition(opts): """Partition the network between containers Replaces any existing partitions outright. Any containers NOT specified in arguments will be globbed into a single implicit partition. For example if you have three containers: c1, c2, and c3 and you run: blockade partition c1 The result will be a partition with just c1 and another partition with c2 and c3. Alternatively, --random may be specified, and zero or more random partitions will be generated by blockade. """ config = load_config(opts.config) b = get_blockade(config, opts) if opts.random: if opts.partitions: raise BlockadeError("Either specify individual partitions " "or --random, but not both") b.random_partition() else: partitions = [] for partition in opts.partitions: names = [] for name in partition.split(","): name = name.strip() if name: names.append(name) partitions.append(names) if not partitions: raise BlockadeError("Either specify individual partitions " "or random") b.partition(partitions)
Partition the network between containers Replaces any existing partitions outright. Any containers NOT specified in arguments will be globbed into a single implicit partition. For example if you have three containers: c1, c2, and c3 and you run: blockade partition c1 The result will be a partition with just c1 and another partition with c2 and c3. Alternatively, --random may be specified, and zero or more random partitions will be generated by blockade.
def new_cells(self, name=None, formula=None): """Create a cells in the space. Args: name: If omitted, the model is named automatically ``CellsN``, where ``N`` is an available number. func: The function to define the formula of the cells. Returns: The new cells. """ # Outside formulas only return self._impl.new_cells(name, formula).interface
Create a cells in the space. Args: name: If omitted, the model is named automatically ``CellsN``, where ``N`` is an available number. func: The function to define the formula of the cells. Returns: The new cells.
def _compute_ll_matrix(self, idx, param_vals, num_pts): """Recursive helper function for compute_ll_matrix. Parameters ---------- idx : int The index of the parameter for this layer of the recursion to work on. `idx` == len(`num_pts`) is the base case that terminates the recursion. param_vals : List of :py:class:`Array` List of arrays of parameter values. Entries in the slots 0:`idx` are set to scalars by the previous levels of recursion. num_pts : :py:class:`Array` The numbers of points for each parameter. Returns ------- vals : :py:class:`Array` The log likelihood for each of the parameter possibilities at lower levels. """ if idx >= len(num_pts): # Base case: All entries in param_vals should be scalars: return -1.0 * self.update_hyperparameters( scipy.asarray(param_vals, dtype=float) ) else: # Recursive case: call _compute_ll_matrix for each entry in param_vals[idx]: vals = scipy.zeros(num_pts[idx:], dtype=float) for k in xrange(0, len(param_vals[idx])): specific_param_vals = list(param_vals) specific_param_vals[idx] = param_vals[idx][k] vals[k] = self._compute_ll_matrix( idx + 1, specific_param_vals, num_pts ) return vals
Recursive helper function for compute_ll_matrix. Parameters ---------- idx : int The index of the parameter for this layer of the recursion to work on. `idx` == len(`num_pts`) is the base case that terminates the recursion. param_vals : List of :py:class:`Array` List of arrays of parameter values. Entries in the slots 0:`idx` are set to scalars by the previous levels of recursion. num_pts : :py:class:`Array` The numbers of points for each parameter. Returns ------- vals : :py:class:`Array` The log likelihood for each of the parameter possibilities at lower levels.
def build(self, X, Y, w=None, edges=None): """ Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph. """ super(MorseComplex, self).build(X, Y, w, edges) if self.debug: sys.stdout.write("Decomposition: ") start = time.clock() morse_complex = MorseComplexFloat( vectorFloat(self.Xnorm.flatten()), vectorFloat(self.Y), str(self.gradient), str(self.simplification), vectorFloat(self.w), self.graph_rep.full_graph(), self.debug, ) self.__amc = morse_complex self.persistences = [] self.merge_sequence = {} morse_complex_json = json.loads(morse_complex.to_json()) hierarchy = morse_complex_json["Hierarchy"] for merge in hierarchy: self.persistences.append(merge["Persistence"]) self.merge_sequence[merge["Dying"]] = ( merge["Persistence"], merge["Surviving"], merge["Saddle"], ) self.persistences = sorted(list(set(self.persistences))) partitions = morse_complex_json["Partitions"] self.base_partitions = {} for i, label in enumerate(partitions): if label not in self.base_partitions: self.base_partitions[label] = [] self.base_partitions[label].append(i) self.max_indices = list(self.base_partitions.keys()) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph.
def create_release_id(short, version, type, bp_short=None, bp_version=None, bp_type=None): """ Create release_id from given parts. :param short: Release short name :type short: str :param version: Release version :type version: str :param version: Release type :type version: str :param bp_short: Base Product short name :type bp_short: str :param bp_version: Base Product version :type bp_version: str :param bp_type: Base Product type :rtype: str """ if not is_valid_release_short(short): raise ValueError("Release short name is not valid: %s" % short) if not is_valid_release_version(version): raise ValueError("Release short version is not valid: %s" % version) if not is_valid_release_type(type): raise ValueError("Release type is not valid: %s" % type) if type == "ga": result = "%s-%s" % (short, version) else: result = "%s-%s-%s" % (short, version, type) if bp_short: result += "@%s" % create_release_id(bp_short, bp_version, bp_type) return result
Create release_id from given parts. :param short: Release short name :type short: str :param version: Release version :type version: str :param version: Release type :type version: str :param bp_short: Base Product short name :type bp_short: str :param bp_version: Base Product version :type bp_version: str :param bp_type: Base Product type :rtype: str
def graph_from_seeds(seeds, cell_source): """ This creates/updates a networkx graph from a list of cells. The graph is created when the cell_source is an instance of ExcelCompiler The graph is updated when the cell_source is an instance of Spreadsheet """ # when called from Spreadsheet instance, use the Spreadsheet cellmap and graph if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet cellmap = cell_source.cellmap cells = cellmap G = cell_source.G for c in seeds: G.add_node(c) cellmap[c.address()] = c # when called from ExcelCompiler instance, construct cellmap and graph from seeds else: # ~ cell_source is a ExcelCompiler cellmap = dict([(x.address(),x) for x in seeds]) cells = cell_source.cells # directed graph G = networkx.DiGraph() # match the info in cellmap for c in cellmap.values(): G.add_node(c) # cells to analyze: only formulas todo = [s for s in seeds if s.formula] steps = [i for i,s in enumerate(todo)] names = cell_source.named_ranges while todo: c1 = todo.pop() step = steps.pop() cursheet = c1.sheet ###### 1) looking for cell c1 dependencies #################### # print 'C1', c1.address() # in case a formula, get all cells that are arguments pystr, ast = cell2code(c1, names) # set the code & compile it (will flag problems sooner rather than later) c1.python_expression = pystr.replace('"', "'") # compilation is done later if 'OFFSET' in c1.formula or 'INDEX' in c1.formula: if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler cell_source.pointers.add(c1.address()) # get all the cells/ranges this formula refers to deps = [x for x in ast.nodes() if isinstance(x,RangeNode)] # remove dupes deps = uniqueify(deps) ###### 2) connect dependencies in cells in graph #################### # ### LOG # tmp = [] # for dep in deps: # if dep not in names: # if "!" not in dep and cursheet != None: # dep = cursheet + "!" + dep # if dep not in cellmap: # tmp.append(dep) # #deps = tmp # logStep = "%s %s = %s " % ('|'*step, c1.address(), '',) # print logStep # if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'): # print logStep, "[%s...%s]" % (deps[0], deps[-1]) # elif len(deps) > 0: # print logStep, "->", deps # else: # print logStep, "done" for dep in deps: dep_name = dep.tvalue.replace('$','') # this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError if dep_name.startswith(':') or dep_name.endswith(':'): dep_name = dep_name.replace(':', '') # if not pointer, we need an absolute address if dep.tsubtype != 'pointer' and dep_name not in names and "!" not in dep_name and cursheet != None: dep_name = cursheet + "!" + dep_name # Named_ranges + ranges already parsed (previous iterations) if dep_name in cellmap: origins = [cellmap[dep_name]] target = cellmap[c1.address()] # if the dep_name is a multi-cell range, create a range object elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])): if dep_name in names: reference = names[dep_name] else: reference = dep_name if 'OFFSET' in reference or 'INDEX' in reference: start_end = prepare_pointer(reference, names, ref_cell = c1) rng = cell_source.Range(start_end) if dep_name in names: # dep is a pointer range address = dep_name else: if c1.address() in names: # c1 holds is a pointer range address = c1.address() else: # a pointer range with no name, its address will be its name address = '%s:%s' % (start_end["start"], start_end["end"]) cell_source.pointers.add(address) else: address = dep_name # get a list of the addresses in this range that are not yet in the graph range_addresses = list(resolve_range(reference, should_flatten=True)[0]) cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()] if len(cellmap_add_addresses) > 0: # this means there are cells to be added # get row and col dimensions for the sheet, assuming the whole range is in one sheet sheet_initial = split_address(cellmap_add_addresses[0])[0] max_rows, max_cols = max_dimension(cellmap, sheet_initial) # create empty cells that aren't in the cellmap for addr in cellmap_add_addresses: sheet_new, col_new, row_new = split_address(addr) # if somehow a new sheet comes up in the range, get the new dimensions if sheet_new != sheet_initial: sheet_initial = sheet_new max_rows, max_cols = max_dimension(cellmap, sheet_new) # add the empty cells if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols: # only add cells within the maximum bounds of the sheet to avoid too many evaluations # for A:A or 1:1 ranges cell_new = Cell(addr, sheet_new, value="", should_eval='False') # create new cell object cellmap[addr] = cell_new # add it to the cellmap G.add_node(cell_new) # add it to the graph cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function rng = cell_source.Range(reference) if address in cellmap: virtual_cell = cellmap[address] else: virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True ) # save the range cellmap[address] = virtual_cell # add an edge from the range to the parent G.add_node(virtual_cell) # Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1 G.add_edge(virtual_cell, c1) # cells in the range should point to the range as their parent target = virtual_cell origins = [] if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here... for child in rng.addresses: if child not in cellmap: origins.append(cells[child]) else: origins.append(cellmap[child]) else: # not a range if dep_name in names: reference = names[dep_name] else: reference = dep_name if reference in cells: if dep_name in names: virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True ) G.add_node(virtual_cell) G.add_edge(cells[reference], virtual_cell) origins = [virtual_cell] else: cell = cells[reference] origins = [cell] cell = origins[0] if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula): cell_source.pointers.add(cell.address()) else: virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True ) origins = [virtual_cell] target = c1 # process each cell for c2 in flatten(origins): # if we havent treated this cell allready if c2.address() not in cellmap: if c2.formula: # cell with a formula, needs to be added to the todo list todo.append(c2) steps.append(step+1) else: # constant cell, no need for further processing, just remember to set the code pystr,ast = cell2code(c2, names) c2.python_expression = pystr c2.compile() # save in the cellmap cellmap[c2.address()] = c2 # add to the graph G.add_node(c2) # add an edge from the cell to the parent (range or cell) if(target != []): # print "Adding edge %s --> %s" % (c2.address(), target.address()) G.add_edge(c2,target) c1.compile() # cell compilation is done here because pointer ranges might update python_expressions return (cellmap, G)
This creates/updates a networkx graph from a list of cells. The graph is created when the cell_source is an instance of ExcelCompiler The graph is updated when the cell_source is an instance of Spreadsheet
def economic_svd(G, epsilon=sqrt(finfo(float).eps)): r"""Economic Singular Value Decomposition. Args: G (array_like): Matrix to be factorized. epsilon (float): Threshold on the square root of the eigen values. Default is ``sqrt(finfo(float).eps)``. Returns: :class:`numpy.ndarray`: Unitary matrix. :class:`numpy.ndarray`: Singular values. :class:`numpy.ndarray`: Unitary matrix. See Also -------- numpy.linalg.svd : Cholesky decomposition. scipy.linalg.svd : Cholesky decomposition. """ from scipy.linalg import svd G = asarray(G, float) (U, S, V) = svd(G, full_matrices=False, check_finite=False) ok = S >= epsilon S = S[ok] U = U[:, ok] V = V[ok, :] return (U, S, V)
r"""Economic Singular Value Decomposition. Args: G (array_like): Matrix to be factorized. epsilon (float): Threshold on the square root of the eigen values. Default is ``sqrt(finfo(float).eps)``. Returns: :class:`numpy.ndarray`: Unitary matrix. :class:`numpy.ndarray`: Singular values. :class:`numpy.ndarray`: Unitary matrix. See Also -------- numpy.linalg.svd : Cholesky decomposition. scipy.linalg.svd : Cholesky decomposition.
def invoke(self, headers, body): """ Invokes the soap service """ xml = Service._create_request(headers, body) try: response = self.session.post(self.endpoint, verify=False, data=xml) logging.debug(response.content) except Exception as e: traceback.print_exc() raise WSManException(e) if response.status_code == 200: return Service._parse_response(response.content) if response.status_code == 401: raise WSManAuthenticationException('the remote host rejected authentication') raise WSManException('the remote host returned an unexpected http status code: %s' % response.status_code)
Invokes the soap service
def autohook(ui, repo, hooktype, **kwargs): """Look for hooks inside the repository to run.""" cmd = hooktype.replace("-", "_") if not repo or not cmd.replace("_", "").isalpha(): return False result = False trusted = ui.configlist("autohooks", "trusted") if "" not in trusted: default_path = ui.config("paths", "default") if not default_path: return False for match in trusted: if default_path.startswith(match): break else: return False for hookdir in ("hg-autohooks", ".hg-autohooks"): dirname = os.path.join(repo.root, hookdir) if not os.path.exists(dirname): continue for leafname in os.listdir(dirname): if not leafname.startswith(cmd + "."): continue filename = os.path.join(dirname, leafname) result = _runhook(ui, repo, hooktype, filename, kwargs) or result return result
Look for hooks inside the repository to run.
def qscan(self, cursor=0, count=None, busyloop=None, minlen=None, maxlen=None, importrate=None): """ Iterate all the existing queues in the local node. :param count: An hint about how much work to do per iteration. :param busyloop: Block and return all the elements in a busy loop. :param minlen: Don't return elements with less than count jobs queued. :param maxlen: Don't return elements with more than count jobs queued. :param importrate: Only return elements with an job import rate (from other nodes) >= rate. """ command = ["QSCAN", cursor] if count: command += ["COUNT", count] if busyloop: command += ["BUSYLOOP"] if minlen: command += ["MINLEN", minlen] if maxlen: command += ["MAXLEN", maxlen] if importrate: command += ["IMPORTRATE", importrate] return self.execute_command(*command)
Iterate all the existing queues in the local node. :param count: An hint about how much work to do per iteration. :param busyloop: Block and return all the elements in a busy loop. :param minlen: Don't return elements with less than count jobs queued. :param maxlen: Don't return elements with more than count jobs queued. :param importrate: Only return elements with an job import rate (from other nodes) >= rate.
def filter(self, filter_func, reverse=False): """Filter current log lines by a given filter function. This allows to drill down data out of the log file by filtering the relevant log lines to analyze. For example, filter by a given IP so only log lines for that IP are further processed with commands (top paths, http status counter...). :param filter_func: [required] Filter method, see filters.py for all available filters. :type filter_func: function :param reverse: negate the filter (so accept all log lines that return ``False``). :type reverse: boolean :returns: a new instance of Log containing only log lines that passed the filter function. :rtype: :class:`Log` """ new_log_file = Log() new_log_file.logfile = self.logfile new_log_file.total_lines = 0 new_log_file._valid_lines = [] new_log_file._invalid_lines = self._invalid_lines[:] # add the reverse conditional outside the loop to keep the loop as # straightforward as possible if not reverse: for i in self._valid_lines: if filter_func(i): new_log_file.total_lines += 1 new_log_file._valid_lines.append(i) else: for i in self._valid_lines: if not filter_func(i): new_log_file.total_lines += 1 new_log_file._valid_lines.append(i) return new_log_file
Filter current log lines by a given filter function. This allows to drill down data out of the log file by filtering the relevant log lines to analyze. For example, filter by a given IP so only log lines for that IP are further processed with commands (top paths, http status counter...). :param filter_func: [required] Filter method, see filters.py for all available filters. :type filter_func: function :param reverse: negate the filter (so accept all log lines that return ``False``). :type reverse: boolean :returns: a new instance of Log containing only log lines that passed the filter function. :rtype: :class:`Log`
def _store_outputs_in_object_store(self, object_ids, outputs): """Store the outputs of a remote function in the local object store. This stores the values that were returned by a remote function in the local object store. If any of the return values are object IDs, then these object IDs are aliased with the object IDs that the scheduler assigned for the return values. This is called by the worker that executes the remote function. Note: The arguments object_ids and outputs should have the same length. Args: object_ids (List[ObjectID]): The object IDs that were assigned to the outputs of the remote function call. outputs (Tuple): The value returned by the remote function. If the remote function was supposed to only return one value, then its output was wrapped in a tuple with one element prior to being passed into this function. """ for i in range(len(object_ids)): if isinstance(outputs[i], ray.actor.ActorHandle): raise Exception("Returning an actor handle from a remote " "function is not allowed).") if outputs[i] is ray.experimental.no_return.NoReturn: if not self.plasma_client.contains( pyarrow.plasma.ObjectID(object_ids[i].binary())): raise RuntimeError( "Attempting to return 'ray.experimental.NoReturn' " "from a remote function, but the corresponding " "ObjectID does not exist in the local object store.") else: self.put_object(object_ids[i], outputs[i])
Store the outputs of a remote function in the local object store. This stores the values that were returned by a remote function in the local object store. If any of the return values are object IDs, then these object IDs are aliased with the object IDs that the scheduler assigned for the return values. This is called by the worker that executes the remote function. Note: The arguments object_ids and outputs should have the same length. Args: object_ids (List[ObjectID]): The object IDs that were assigned to the outputs of the remote function call. outputs (Tuple): The value returned by the remote function. If the remote function was supposed to only return one value, then its output was wrapped in a tuple with one element prior to being passed into this function.
def _fix_attribute_names(attrs, change_map): """ Change attribute names as per values in change_map dictionary. Parameters ---------- :param attrs : dict Dict of operator attributes :param change_map : dict Dict of onnx attribute name to mxnet attribute names. Returns ------- :return new_attr : dict Converted dict of operator attributes. """ new_attr = {} for k in attrs.keys(): if k in change_map: new_attr[change_map[k]] = attrs[k] else: new_attr[k] = attrs[k] return new_attr
Change attribute names as per values in change_map dictionary. Parameters ---------- :param attrs : dict Dict of operator attributes :param change_map : dict Dict of onnx attribute name to mxnet attribute names. Returns ------- :return new_attr : dict Converted dict of operator attributes.
def walkWords(self, showPadding: bool=False): """ Walk enumerated words in this frame :attention: not all indexes has to be present, only words with items will be generated when not showPadding :param showPadding: padding TransParts are also present :return: generator of tuples (wordIndex, list of TransParts in this word) """ wIndex = 0 lastEnd = self.startBitAddr parts = [] for p in self.parts: end = p.startOfPart if showPadding and end != lastEnd: # insert padding while end != lastEnd: assert end >= lastEnd, (end, lastEnd) endOfWord = ceil( (lastEnd + 1) / self.wordWidth) * self.wordWidth endOfPadding = min(endOfWord, end) _p = TransPart(self, None, lastEnd, endOfPadding, 0) parts.append(_p) if endOfPadding >= endOfWord: yield (wIndex, parts) wIndex += 1 parts = [] lastEnd = endOfPadding if self._wordIndx(lastEnd) != self._wordIndx(p.startOfPart): yield (wIndex, parts) wIndex += 1 parts = [] lastEnd = p.endOfPart parts.append(p) lastEnd = p.endOfPart if lastEnd % self.wordWidth == 0: yield (wIndex, parts) wIndex += 1 parts = [] if showPadding and (parts or lastEnd != self.endBitAddr or lastEnd % self.wordWidth != 0): # align end to end of last word end = ceil(self.endBitAddr / self.wordWidth) * self.wordWidth while end != lastEnd: assert end >= lastEnd, (end, lastEnd) endOfWord = ((lastEnd // self.wordWidth) + 1) * self.wordWidth endOfPadding = min(endOfWord, end) _p = TransPart(self, None, lastEnd, endOfPadding, 0) _p.parent = self parts.append(_p) if endOfPadding >= endOfWord: yield (wIndex, parts) wIndex += 1 parts = [] lastEnd = endOfPadding if parts: # in the case end of frame is not aligned to end of word yield (wIndex, parts)
Walk enumerated words in this frame :attention: not all indexes has to be present, only words with items will be generated when not showPadding :param showPadding: padding TransParts are also present :return: generator of tuples (wordIndex, list of TransParts in this word)
def get_base_path() -> Path: """ Get the base data path for EFB. This can be defined by the environment variable ``EFB_DATA_PATH``. If ``EFB_DATA_PATH`` is not defined, this gives ``~/.ehforwarderbot``. This method creates the queried path if not existing. Returns: The base path. """ env_data_path = os.environ.get("EFB_DATA_PATH", None) if env_data_path: base_path = Path(env_data_path).resolve() else: base_path = Path.home() / ".ehforwarderbot" if not base_path.exists(): base_path.mkdir(parents=True) return base_path
Get the base data path for EFB. This can be defined by the environment variable ``EFB_DATA_PATH``. If ``EFB_DATA_PATH`` is not defined, this gives ``~/.ehforwarderbot``. This method creates the queried path if not existing. Returns: The base path.
def commit_confirmed(name): ''' .. versionadded:: 2019.2.0 Confirm a commit scheduled to be reverted via the ``revert_in`` and ``revert_at`` arguments from the :mod:`net.load_template <salt.modules.napalm_network.load_template>` or :mod:`net.load_config <salt.modules.napalm_network.load_config>` execution functions. The commit ID is displayed when the commit confirmed is scheduled via the functions named above. State SLS Example: .. code-block:: yaml '20180726083540640360': netconfig.commit_confirmed ''' confirmed = { 'name': name, 'result': None, 'changes': {}, 'comment': '' } if __opts__['test']: confirmed['comment'] = 'It would confirm commit #{}'.format(name) return confirmed ret = __salt__['net.confirm_commit'](name) confirmed.update(ret) return confirmed
.. versionadded:: 2019.2.0 Confirm a commit scheduled to be reverted via the ``revert_in`` and ``revert_at`` arguments from the :mod:`net.load_template <salt.modules.napalm_network.load_template>` or :mod:`net.load_config <salt.modules.napalm_network.load_config>` execution functions. The commit ID is displayed when the commit confirmed is scheduled via the functions named above. State SLS Example: .. code-block:: yaml '20180726083540640360': netconfig.commit_confirmed
def describe_ring(self, keyspace): """ get the token ring: a map of ranges to host addresses, represented as a set of TokenRange instead of a map from range to list of endpoints, because you can't use Thrift structs as map keys: https://issues.apache.org/jira/browse/THRIFT-162 for the same reason, we can't return a set here, even though order is neither important nor predictable. Parameters: - keyspace """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_ring(keyspace) return d
get the token ring: a map of ranges to host addresses, represented as a set of TokenRange instead of a map from range to list of endpoints, because you can't use Thrift structs as map keys: https://issues.apache.org/jira/browse/THRIFT-162 for the same reason, we can't return a set here, even though order is neither important nor predictable. Parameters: - keyspace
def trainTopicOnTweets(self, twitterQuery, useTweetText=True, useIdfNormalization=True, normalization="linear", maxTweets=2000, maxUsedLinks=500, ignoreConceptTypes=[], maxConcepts = 20, maxCategories = 10, notifyEmailAddress = None): """ create a new topic and train it using the tweets that match the twitterQuery @param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url), a hash tag (using "#" prefix) or a regular keyword. @param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared in the articles in the user's tweets will be analyzed @param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts) @param normalization: way to normalize the concept weights ("none", "linear") @param maxTweets: maximum number of tweets to collect (default 2000, max 5000) @param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000) @param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those @param maxConcepts: the number of concepts to save in the final topic @param maxCategories: the number of categories to save in the final topic @param maxTweets: the maximum number of tweets to collect for the user to analyze @param notifyEmailAddress: when finished, should we send a notification email to this address? """ assert maxTweets < 5000, "we can analyze at most 5000 tweets" params = {"twitterQuery": twitterQuery, "useTweetText": useTweetText, "useIdfNormalization": useIdfNormalization, "normalization": normalization, "maxTweets": maxTweets, "maxUsedLinks": maxUsedLinks, "maxConcepts": maxConcepts, "maxCategories": maxCategories } if notifyEmailAddress: params["notifyEmailAddress"] = notifyEmailAddress if len(ignoreConceptTypes) > 0: params["ignoreConceptTypes"] = ignoreConceptTypes return self._er.jsonRequestAnalytics("/api/v1/trainTopicOnTwitter", params)
create a new topic and train it using the tweets that match the twitterQuery @param twitterQuery: string containing the content to search for. It can be a Twitter user account (using "@" prefix or user's Twitter url), a hash tag (using "#" prefix) or a regular keyword. @param useTweetText: do you want to analyze the content of the tweets and extract the concepts mentioned in them? If False, only content shared in the articles in the user's tweets will be analyzed @param useIdfNormalization: normalize identified concepts by their IDF in the news (punish very common concepts) @param normalization: way to normalize the concept weights ("none", "linear") @param maxTweets: maximum number of tweets to collect (default 2000, max 5000) @param maxUsedLinks: maximum number of article links in the tweets to analyze (default 500, max 2000) @param ignoreConceptTypes: what types of concepts you would like to ignore in the profile. options: person, org, loc, wiki or an array with those @param maxConcepts: the number of concepts to save in the final topic @param maxCategories: the number of categories to save in the final topic @param maxTweets: the maximum number of tweets to collect for the user to analyze @param notifyEmailAddress: when finished, should we send a notification email to this address?
def get_slab_stats(self): """Retrieve slab stats from memcached.""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.host, self.port)) s.send("stats slabs\n") try: data = "" while True: data += s.recv(4096) if data.endswith('END\r\n'): break return data finally: s.close()
Retrieve slab stats from memcached.
def makeB(self, buses=None, branches=None, method="XB"): """ Based on makeB.m from MATPOWER by Ray Zimmerman, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. @param method: Specify "XB" or "BX" method. @type method: string @rtype: tuple @return: Two matrices, B prime and B double prime, used in the fast decoupled power flow solver. """ buses = self.connected_buses if buses is None else buses branches = self.online_branches if branches is None else branches B_buses = copy.deepcopy(buses) # modify bus copies Bp_branches = copy.deepcopy(branches) # modify branch copies Bpp_branches = copy.deepcopy(branches) for bus in B_buses: bus.b_shunt = 0.0 for branch in Bp_branches: branch.b = 0.0 branch.ratio = 1.0 if method == "XB": branch.r = 0.0 Yp, _, _ = self.getYbus(B_buses, Bp_branches) for branch in Bpp_branches: branch.phase_shift = 0.0 if method == "BX": branch.r = 0.0 Ypp, _, _ = self.getYbus(B_buses, Bpp_branches) del B_buses del Bp_branches return -Yp.imag, -Ypp.imag
Based on makeB.m from MATPOWER by Ray Zimmerman, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. @param method: Specify "XB" or "BX" method. @type method: string @rtype: tuple @return: Two matrices, B prime and B double prime, used in the fast decoupled power flow solver.
def user(context, mail): """Delete a user from the database""" LOG.info("Running scout delete user") adapter = context.obj['adapter'] user_obj = adapter.user(mail) if not user_obj: LOG.warning("User {0} could not be found in database".format(mail)) else: adapter.delete_user(mail)
Delete a user from the database
def newTextChild(self, ns, name, content): """Creation of a new child element, added at the end of @parent children list. @ns and @content parameters are optional (None). If @ns is None, the newly created element inherits the namespace of @parent. If @content is non None, a child TEXT node will be created containing the string @content. NOTE: Use xmlNewChild() if @content will contain entities that need to be preserved. Use this function, xmlNewTextChild(), if you need to ensure that reserved XML chars that might appear in @content, such as the ampersand, greater-than or less-than signs, are automatically replaced by their XML escaped entity representations. """ if ns is None: ns__o = None else: ns__o = ns._o ret = libxml2mod.xmlNewTextChild(self._o, ns__o, name, content) if ret is None:raise treeError('xmlNewTextChild() failed') __tmp = xmlNode(_obj=ret) return __tmp
Creation of a new child element, added at the end of @parent children list. @ns and @content parameters are optional (None). If @ns is None, the newly created element inherits the namespace of @parent. If @content is non None, a child TEXT node will be created containing the string @content. NOTE: Use xmlNewChild() if @content will contain entities that need to be preserved. Use this function, xmlNewTextChild(), if you need to ensure that reserved XML chars that might appear in @content, such as the ampersand, greater-than or less-than signs, are automatically replaced by their XML escaped entity representations.
def color_grid(data, palette, denom=9.0, mask_zeros=True): """ Convert the given data (2d array of numbers or binary strings) to a 2d array of RGB or RGBA values which can then be visualized as a heat map. Arguments: data - 2d array of numbers or binary strings palette - a seaborn palette (list of RGB values) indicating how to convert data to colors. Will be converted to a continuous colormap if necessary. This should generally be the length of the longest binary string or the highest possible number denom - if the data is composed of numbers rather than binary strings, this number will indicate how to normalize the data to [0, 1] should it be neccessary. mask_zeros - Boolean indicating whether 0s should be colored white rather than the color specified by the palette. -1s always yield -1 so that missing data can be handled appropriately. """ grid = [] try: # If this isn't numeric, don't bother with this block float(data[0][0]) # This is continuous data - we need a colormap rather than palette palette = matplotlib.colors.LinearSegmentedColormap.from_list( "color_grid", palette) palette.set_bad(alpha=0) except: pass for row in range(len(data)): grid.append([]) for col in range(len(data[row])): try: rgb = color_array_by_value(data[row][col], palette, denom, mask_zeros) except: rgb = color_array_by_hue_mix(data[row][col], palette) grid[row].append(rgb) return grid
Convert the given data (2d array of numbers or binary strings) to a 2d array of RGB or RGBA values which can then be visualized as a heat map. Arguments: data - 2d array of numbers or binary strings palette - a seaborn palette (list of RGB values) indicating how to convert data to colors. Will be converted to a continuous colormap if necessary. This should generally be the length of the longest binary string or the highest possible number denom - if the data is composed of numbers rather than binary strings, this number will indicate how to normalize the data to [0, 1] should it be neccessary. mask_zeros - Boolean indicating whether 0s should be colored white rather than the color specified by the palette. -1s always yield -1 so that missing data can be handled appropriately.
def _filter_namespaces_by_route_whitelist(self): """ Given a parsed API in IR form, filter the user-defined datatypes so that they include only the route datatypes and their direct dependencies. """ assert self._routes is not None, "Missing route whitelist" assert 'route_whitelist' in self._routes assert 'datatype_whitelist' in self._routes # Get route whitelist in canonical form route_whitelist = {} for namespace_name, route_reprs in self._routes['route_whitelist'].items(): new_route_reprs = [] if route_reprs == ['*']: namespace = self.api.namespaces[namespace_name] new_route_reprs = [route.name_with_version() for route in namespace.routes] else: for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) if version > 1: new_route_reprs.append('{}:{}'.format(route_name, version)) else: new_route_reprs.append(route_name) route_whitelist[namespace_name] = new_route_reprs # Parse the route whitelist and populate any starting data types route_data_types = [] for namespace_name, route_reprs in route_whitelist.items(): # Error out if user supplied nonexistent namespace if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) namespace = self.api.namespaces[namespace_name] # Parse namespace doc refs and add them to the starting data types if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) # Parse user-specified routes and add them to the starting data types # Note that this may add duplicates, but that's okay, as the recursion # keeps track of visited data types. assert '*' not in route_reprs for routes_repr in route_reprs: route_name, version = parse_route_name_and_version(routes_repr) if route_name not in namespace.routes_by_name or \ version not in namespace.routes_by_name[route_name].at_version: raise AssertionError('Route %s at version %d is not defined!' % (route_name, version)) route = namespace.routes_by_name[route_name].at_version[version] route_data_types.extend(namespace.get_route_io_data_types_for_route(route)) if route.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, route.doc, namespace_name)) # Parse the datatype whitelist and populate any starting data types for namespace_name, datatype_names in self._routes['datatype_whitelist'].items(): if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) # Parse namespace doc refs and add them to the starting data types namespace = self.api.namespaces[namespace_name] if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) for datatype_name in datatype_names: if datatype_name not in self.api.namespaces[namespace_name].data_type_by_name: raise AssertionError('Datatype %s is not defined!' % datatype_name) data_type = self.api.namespaces[namespace_name].data_type_by_name[datatype_name] route_data_types.append(data_type) # Recurse on dependencies output_types_by_ns, output_routes_by_ns = self._find_dependencies(route_data_types) # Update the IR representation. This involves editing the data types and # routes for each namespace. for namespace in self.api.namespaces.values(): data_types = list(set(output_types_by_ns[namespace.name])) # defaults to empty list namespace.data_types = data_types namespace.data_type_by_name = {d.name: d for d in data_types} output_route_reprs = [output_route.name_with_version() for output_route in output_routes_by_ns[namespace.name]] if namespace.name in route_whitelist: whitelisted_route_reprs = route_whitelist[namespace.name] route_reprs = list(set(whitelisted_route_reprs + output_route_reprs)) else: route_reprs = output_route_reprs routes = [] for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) route = namespace.routes_by_name[route_name].at_version[version] routes.append(route) namespace.routes = [] namespace.route_by_name = {} namespace.routes_by_name = {} for route in routes: namespace.add_route(route)
Given a parsed API in IR form, filter the user-defined datatypes so that they include only the route datatypes and their direct dependencies.
def onchange(self, new_value): """Called when the user changes the TextInput content. With single_line=True it fires in case of focus lost and Enter key pressed. With single_line=False it fires at each key released. Args: new_value (str): the new string content of the TextInput. """ self.disable_refresh() self.set_value(new_value) self.enable_refresh() return (new_value, )
Called when the user changes the TextInput content. With single_line=True it fires in case of focus lost and Enter key pressed. With single_line=False it fires at each key released. Args: new_value (str): the new string content of the TextInput.
def _RemoveForemanRule(self): """Removes the foreman rule corresponding to this hunt.""" if data_store.RelationalDBEnabled(): data_store.REL_DB.RemoveForemanRule(hunt_id=self.session_id.Basename()) return with aff4.FACTORY.Open( "aff4:/foreman", mode="rw", token=self.token) as foreman: aff4_rules = foreman.Get(foreman.Schema.RULES) aff4_rules = foreman.Schema.RULES( # Remove those rules which fire off this hunt id. [r for r in aff4_rules if r.hunt_id != self.session_id]) foreman.Set(aff4_rules)
Removes the foreman rule corresponding to this hunt.
def get_editor_nodes(self, editor, node=None): """ Returns the :class:`umbra.components.factory.script_editor.nodes.EditorNode` class Nodes with given editor. :param node: Node to start walking from. :type node: AbstractNode or AbstractCompositeNode or Object :param editor: Editor. :type editor: Editor :return: EditorNode nodes. :rtype: list """ return [editor_node for editor_node in self.list_editor_nodes(node) if editor_node.editor == editor]
Returns the :class:`umbra.components.factory.script_editor.nodes.EditorNode` class Nodes with given editor. :param node: Node to start walking from. :type node: AbstractNode or AbstractCompositeNode or Object :param editor: Editor. :type editor: Editor :return: EditorNode nodes. :rtype: list
def send_sms(message, from_number, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None): """ Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. If auth_user is None, the EMAIL_HOST_USER setting is used. If auth_password is None, the EMAIL_HOST_PASSWORD setting is used. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. https://github.com/django/django/blob/master/django/core/mail/__init__.py#L40 """ connection = connection or get_sms_connection(username=auth_user, password=auth_password, fail_silently=fail_silently) mail = SMSMessage(message, from_number, recipient_list, connection=connection) return mail.send()
Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. If auth_user is None, the EMAIL_HOST_USER setting is used. If auth_password is None, the EMAIL_HOST_PASSWORD setting is used. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. https://github.com/django/django/blob/master/django/core/mail/__init__.py#L40
def require_Gtk(min_version=2): """ Make sure Gtk is properly initialized. :raises RuntimeError: if Gtk can not be properly initialized """ if not _in_X: raise RuntimeError('Not in X session.') if _has_Gtk < min_version: raise RuntimeError('Module gi.repository.Gtk not available!') if _has_Gtk == 2: logging.getLogger(__name__).warn( _("Missing runtime dependency GTK 3. Falling back to GTK 2 " "for password prompt")) from gi.repository import Gtk # if we attempt to create any GUI elements with no X server running the # program will just crash, so let's make a way to catch this case: if not Gtk.init_check(None)[0]: raise RuntimeError(_("X server not connected!")) return Gtk
Make sure Gtk is properly initialized. :raises RuntimeError: if Gtk can not be properly initialized
def walk(self, where="/"): """ Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path` """ _tables() self._check_if_open() for g in self._handle.walk_groups(where): if getattr(g._v_attrs, 'pandas_type', None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, 'pandas_type', None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip('/'), groups, leaves)
Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path`
def rename_table(dbconn, original, new): """ Rename a table in the database :param dbconn: database connection :param original: original table name :param new: new table name """ cur = dbconn.cursor() cur.execute("ALTER TABLE '{original}' RENAME TO '{new}'".format(original=original, new=new))
Rename a table in the database :param dbconn: database connection :param original: original table name :param new: new table name
def _get_reference(self): """ Sets up references to important components. A reference is typically an index or a list of indices that point to the corresponding elements in a flatten array, which is how MuJoCo stores physical simulation data. """ super()._get_reference() self.cubeA_body_id = self.sim.model.body_name2id("cubeA") self.cubeB_body_id = self.sim.model.body_name2id("cubeB") self.l_finger_geom_ids = [ self.sim.model.geom_name2id(x) for x in self.gripper.left_finger_geoms ] self.r_finger_geom_ids = [ self.sim.model.geom_name2id(x) for x in self.gripper.right_finger_geoms ] self.cubeA_geom_id = self.sim.model.geom_name2id("cubeA") self.cubeB_geom_id = self.sim.model.geom_name2id("cubeB")
Sets up references to important components. A reference is typically an index or a list of indices that point to the corresponding elements in a flatten array, which is how MuJoCo stores physical simulation data.
def _post(self, url, data=None): """ Handle authenticated POST requests :param url: The url for the endpoint including path parameters :type url: :py:class:`str` :param data: The request body parameters :type data: :py:data:`none` or :py:class:`dict` :returns: The JSON output from the API or an error message """ url = urljoin(self.base_url, url) try: r = self._make_request(**dict( method='POST', url=url, json=data, auth=self.auth, timeout=self.timeout, hooks=self.request_hooks, headers=self.request_headers )) except requests.exceptions.RequestException as e: raise e else: if r.status_code >= 400: # in case of a 500 error, the response might not be a JSON try: error_data = r.json() except ValueError: error_data = { "response": r } raise MailChimpError(error_data) if r.status_code == 204: return None return r.json()
Handle authenticated POST requests :param url: The url for the endpoint including path parameters :type url: :py:class:`str` :param data: The request body parameters :type data: :py:data:`none` or :py:class:`dict` :returns: The JSON output from the API or an error message
def get_last_day_of_month(t: datetime) -> int: """ Returns day number of the last day of the month :param t: datetime :return: int """ tn = t + timedelta(days=32) tn = datetime(year=tn.year, month=tn.month, day=1) tt = tn - timedelta(hours=1) return tt.day
Returns day number of the last day of the month :param t: datetime :return: int
def forward(self, input_tensor): # pylint: disable=arguments-differ """ Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied. """ ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1]) dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False) if self.inplace: input_tensor *= dropout_mask.unsqueeze(1) return None else: return dropout_mask.unsqueeze(1) * input_tensor
Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied.
def path_shift(self, count=1): ''' Shift some levels of PATH_INFO into SCRIPT_NAME and return the moved part. count defaults to 1''' #/a/b/ /c/d --> 'a','b' 'c','d' if count == 0: return '' pathlist = self.path.strip('/').split('/') scriptlist = self.environ.get('SCRIPT_NAME','/').strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if count > 0 and count <= len(pathlist): moved = pathlist[:count] scriptlist = scriptlist + moved pathlist = pathlist[count:] elif count < 0 and count >= -len(scriptlist): moved = scriptlist[count:] pathlist = moved + pathlist scriptlist = scriptlist[:count] else: empty = 'SCRIPT_NAME' if count < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) self['PATH_INFO'] = self.path = '/' + '/'.join(pathlist) \ + ('/' if self.path.endswith('/') and pathlist else '') self['SCRIPT_NAME'] = '/' + '/'.join(scriptlist) return '/'.join(moved)
Shift some levels of PATH_INFO into SCRIPT_NAME and return the moved part. count defaults to 1
def show(ctx): """ Show migrations list """ for app_name, app in ctx.obj['config']['apps'].items(): click.echo(click.style(app_name, fg='green', bold=True)) for migration in app['migrations']: applied = ctx.obj['db'].is_migration_applied(app_name, migration) click.echo(' {0} {1}'.format(migration, click.style('(applied)', bold=True) if applied else ''))
Show migrations list
def average(self, projection=None): """ Takes the average of elements in the sequence >>> seq([1, 2]).average() 1.5 >>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1]) :param projection: function to project on the sequence before taking the average :return: average of elements in the sequence """ length = self.size() if projection: return sum(self.map(projection)) / length else: return sum(self) / length
Takes the average of elements in the sequence >>> seq([1, 2]).average() 1.5 >>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1]) :param projection: function to project on the sequence before taking the average :return: average of elements in the sequence
def capture_ratio(self, benchmark, threshold=0.0, compare_op=("ge", "lt")): """Capture ratio--ratio of upside to downside capture. Upside capture ratio divided by the downside capture ratio. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. threshold : float, default 0. The threshold at which the comparison should be done. `self` and `benchmark` are "filtered" to periods where `benchmark` is greater than/less than `threshold`. compare_op : {tuple, str, list}, default ('ge', 'lt') Comparison operator used to compare to `threshold`. If a sequence, the two elements are passed to `self.up_capture()` and `self.down_capture()`, respectively. If `str`, indicates the comparison operater used in both method calls. Returns ------- float """ if isinstance(compare_op(tuple, list)): op1, op2 = compare_op else: op1, op2 = compare_op, compare_op uc = self.up_capture( benchmark=benchmark, threshold=threshold, compare_op=op1 ) dc = self.down_capture( benchmark=benchmark, threshold=threshold, compare_op=op2 ) return uc / dc
Capture ratio--ratio of upside to downside capture. Upside capture ratio divided by the downside capture ratio. Parameters ---------- benchmark : {pd.Series, TSeries, 1d np.ndarray} The benchmark security to which `self` is compared. threshold : float, default 0. The threshold at which the comparison should be done. `self` and `benchmark` are "filtered" to periods where `benchmark` is greater than/less than `threshold`. compare_op : {tuple, str, list}, default ('ge', 'lt') Comparison operator used to compare to `threshold`. If a sequence, the two elements are passed to `self.up_capture()` and `self.down_capture()`, respectively. If `str`, indicates the comparison operater used in both method calls. Returns ------- float
def _extract_info_from_package(dependency, extract_type=None, debug=False, include_build_requirements=False ): """ Internal function to extract metainfo from a package. Currently supported info types: - name - dependencies (a list of dependencies) """ output_folder = tempfile.mkdtemp(prefix="pythonpackage-metafolder-") try: extract_metainfo_files_from_package( dependency, output_folder, debug=debug ) with open(os.path.join(output_folder, "METADATA"), "r", encoding="utf-8" ) as f: # Get metadata and cut away description (is after 2 linebreaks) metadata_entries = f.read().partition("\n\n")[0].splitlines() if extract_type == "name": name = None for meta_entry in metadata_entries: if meta_entry.lower().startswith("name:"): return meta_entry.partition(":")[2].strip() if name is None: raise ValueError("failed to obtain package name") return name elif extract_type == "dependencies": requirements = [] if os.path.exists(os.path.join(output_folder, 'pyproject.toml') ) and include_build_requirements: with open(os.path.join(output_folder, 'pyproject.toml')) as f: build_sys = pytoml.load(f)['build-system'] if "requires" in build_sys: requirements += build_sys["requires"] # Add requirements from metadata: requirements += [ entry.rpartition("Requires-Dist:")[2].strip() for entry in metadata_entries if entry.startswith("Requires-Dist") ] return list(set(requirements)) # remove duplicates finally: shutil.rmtree(output_folder)
Internal function to extract metainfo from a package. Currently supported info types: - name - dependencies (a list of dependencies)
def peek(self): """Peek at the oldest reading in this virtual stream.""" if self.reading is None: raise StreamEmptyError("peek called on virtual stream walker without any data", selector=self.selector) return self.reading
Peek at the oldest reading in this virtual stream.
def create_db_entry(self, tfi): """Create a db entry for the given task file info :param tfi: the info for a TaskFile entry in the db :type tfi: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: the created taskfile and note :rtype: tuple :raises: ValidationError """ if tfi.task.department.assetflag: comment = self.asset_comment_pte.toPlainText() else: comment = self.shot_comment_pte.toPlainText() return tfi.create_db_entry(comment)
Create a db entry for the given task file info :param tfi: the info for a TaskFile entry in the db :type tfi: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: the created taskfile and note :rtype: tuple :raises: ValidationError
def parseAndSave(option, urlOrPaths, outDir=None, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar, responseMimeType='application/json', metaExtension='_meta.json', services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta'}): ''' Parse the objects and write extracted metadata and/or text in JSON format to matching filename with an extension of '_meta.json'. :param option: :param urlOrPaths: :param outDir: :param serverEndpoint: :param verbose: :param tikaServerJar: :param responseMimeType: :param metaExtension: :param services: :return: ''' metaPaths = [] paths = getPaths(urlOrPaths) for path in paths: if outDir is None: metaPath = path + metaExtension else: metaPath = os.path.join(outDir, os.path.split(path)[1] + metaExtension) log.info('Writing %s' % metaPath) with open(metaPath, 'w', 'utf-8') as f: f.write(parse1(option, path, serverEndpoint, verbose, tikaServerJar, \ responseMimeType, services)[1] + u"\n") metaPaths.append(metaPath) return metaPaths
Parse the objects and write extracted metadata and/or text in JSON format to matching filename with an extension of '_meta.json'. :param option: :param urlOrPaths: :param outDir: :param serverEndpoint: :param verbose: :param tikaServerJar: :param responseMimeType: :param metaExtension: :param services: :return:
def response_hook(self, response, **kwargs) -> HTMLResponse: """ Change response enconding and replace it by a HTMLResponse. """ if not response.encoding: response.encoding = DEFAULT_ENCODING return HTMLResponse._from_response(response, self)
Change response enconding and replace it by a HTMLResponse.
def graceful_stop(self, signal_number=None, stack_frame=None): """ This function will be called when a graceful-stop is initiated. """ stop_msg = "Hard" if self.shutdown else "Graceful" if signal_number is None: self.log.info("%s stop called manually. " "Shutting down.", stop_msg) else: self.log.info("%s stop called by signal #%s. Shutting down." "Stack Frame: %s", stop_msg, signal_number, stack_frame) self.shutdown = True self.crawler_list.stop() self.daemon_list.stop() self.thread_event.set() return True
This function will be called when a graceful-stop is initiated.
def _decode_attributes(self): """Decode attributes of the stanza XML element and put them into the stanza properties.""" try: from_jid = self._element.get('from') if from_jid: self._from_jid = JID(from_jid) to_jid = self._element.get('to') if to_jid: self._to_jid = JID(to_jid) except ValueError: raise JIDMalformedProtocolError self._stanza_type = self._element.get('type') self._stanza_id = self._element.get('id') lang = self._element.get(XML_LANG_QNAME) if lang: self._language = lang
Decode attributes of the stanza XML element and put them into the stanza properties.
def get_language_from_json(language, key): """Finds the given language in a json file.""" file_name = os.path.join( os.path.dirname(__file__), 'languages', '{0}.json').format(key.lower()) if os.path.exists(file_name): try: with open(file_name, 'r', encoding='utf-8') as fh: languages = json.loads(fh.read()) if languages.get(language.lower()): return languages[language.lower()] except: log.traceback(logging.DEBUG) return None
Finds the given language in a json file.
def submit_import(cls, volume, location, project=None, name=None, overwrite=False, properties=None, parent=None, preserve_folder_structure=True, api=None): """ Submits new import job. :param volume: Volume identifier. :param location: Volume location. :param project: Project identifier. :param name: Optional file name. :param overwrite: If true it will overwrite file if exists. :param properties: Properties dictionary. :param parent: The ID of the target folder to which the item should be imported. Should not be used together with project. :param preserve_folder_structure: Whether to keep the exact source folder structure. The default value is true if the item being imported is a folder. Should not be used if you are importing a file. :param api: Api instance. :return: Import object. """ data = {} volume = Transform.to_volume(volume) if project and parent: raise SbgError( 'Project and parent identifiers are mutually exclusive' ) elif project: project = Transform.to_project(project) destination = { 'project': project } elif parent: parent = Transform.to_file(parent) destination = { 'parent': parent } else: raise SbgError('Project or parent identifier is required.') source = { 'volume': volume, 'location': location } if name: destination['name'] = name data['source'] = source data['destination'] = destination data['overwrite'] = overwrite if not preserve_folder_structure: data['preserve_folder_structure'] = preserve_folder_structure if properties: data['properties'] = properties api = api if api else cls._API extra = { 'resource': cls.__name__, 'query': data } logger.info('Submitting import', extra=extra) _import = api.post(cls._URL['query'], data=data).json() return Import(api=api, **_import)
Submits new import job. :param volume: Volume identifier. :param location: Volume location. :param project: Project identifier. :param name: Optional file name. :param overwrite: If true it will overwrite file if exists. :param properties: Properties dictionary. :param parent: The ID of the target folder to which the item should be imported. Should not be used together with project. :param preserve_folder_structure: Whether to keep the exact source folder structure. The default value is true if the item being imported is a folder. Should not be used if you are importing a file. :param api: Api instance. :return: Import object.
def _control_transfer(self, data): """ Send device a control request with standard parameters and <data> as payload. """ LOGGER.debug('Ctrl transfer: %r', data) self._device.ctrl_transfer(bmRequestType=0x21, bRequest=0x09, wValue=0x0200, wIndex=0x01, data_or_wLength=data, timeout=TIMEOUT)
Send device a control request with standard parameters and <data> as payload.
def process(self, request_adu): """ Process request ADU and return response. :param request_adu: A bytearray containing the ADU request. :return: A bytearray containing the response of the ADU request. """ validate_crc(request_adu) return super(RTUServer, self).process(request_adu)
Process request ADU and return response. :param request_adu: A bytearray containing the ADU request. :return: A bytearray containing the response of the ADU request.
def __onclick(self, event): """ respond to mouse clicks in the plot. This function responds to clicks on the first (horizontal slice) plot and updates the vertical profile and slice plots Parameters ---------- event: matplotlib.backend_bases.MouseEvent the click event object containing image coordinates """ # only do something if the first plot has been clicked on if event.inaxes == self.ax1: # retrieve the click coordinates self.x_coord = event.xdata self.y_coord = event.ydata # redraw the cross-hair self.__reset_crosshair() x, y = self.__map2img(self.x_coord, self.y_coord) subset_vertical = self.__read_timeseries(x, y) # redraw/clear the vertical profile plot in case stacking is disabled if not self.checkbox.value: self.__init_vertical_plot() # plot the vertical profile label = 'x: {0:03}; y: {1:03}'.format(x, y) self.ax2.plot(self.timestamps, subset_vertical, label=label) self.ax2_legend = self.ax2.legend(loc=0, prop={'size': 7}, markerscale=1)
respond to mouse clicks in the plot. This function responds to clicks on the first (horizontal slice) plot and updates the vertical profile and slice plots Parameters ---------- event: matplotlib.backend_bases.MouseEvent the click event object containing image coordinates
def create_portable_topology(topol, struct, **kwargs): """Create a processed topology. The processed (or portable) topology file does not contain any ``#include`` statements and hence can be easily copied around. It also makes it possible to re-grompp without having any special itp files available. :Arguments: *topol* topology file *struct* coordinat (structure) file :Keywords: *processed* name of the new topology file; if not set then it is named like *topol* but with ``pp_`` prepended *includes* path or list of paths of directories in which itp files are searched for *grompp_kwargs** other options for :program:`grompp` such as ``maxwarn=2`` can also be supplied :Returns: full path to the processed topology """ _topoldir, _topol = os.path.split(topol) processed = kwargs.pop('processed', os.path.join(_topoldir, 'pp_'+_topol)) grompp_kwargs, mdp_kwargs = filter_grompp_options(**kwargs) mdp_kwargs = add_mdp_includes(topol, mdp_kwargs) with tempfile.NamedTemporaryFile(suffix='.mdp') as mdp: mdp.write('; empty mdp file\ninclude = {include!s}\n'.format(**mdp_kwargs)) mdp.flush() grompp_kwargs['p'] = topol grompp_kwargs['pp'] = processed grompp_kwargs['f'] = mdp.name grompp_kwargs['c'] = struct grompp_kwargs['v'] = False try: gromacs.grompp(**grompp_kwargs) finally: utilities.unlink_gmx('topol.tpr', 'mdout.mdp') return utilities.realpath(processed)
Create a processed topology. The processed (or portable) topology file does not contain any ``#include`` statements and hence can be easily copied around. It also makes it possible to re-grompp without having any special itp files available. :Arguments: *topol* topology file *struct* coordinat (structure) file :Keywords: *processed* name of the new topology file; if not set then it is named like *topol* but with ``pp_`` prepended *includes* path or list of paths of directories in which itp files are searched for *grompp_kwargs** other options for :program:`grompp` such as ``maxwarn=2`` can also be supplied :Returns: full path to the processed topology
def _validate_timeout(seconds: float): """Creates an int from 60000 to 4294967294 that represents a valid millisecond wireless LAN timeout""" val = int(seconds * 1000) assert 60000 <= val <= 4294967294, "Bad value: {}".format(val) return val
Creates an int from 60000 to 4294967294 that represents a valid millisecond wireless LAN timeout
def isa_to_graph(isa: ISA) -> nx.Graph: """ Construct a NetworkX qubit topology from an ISA object. This discards information about supported gates. :param isa: The ISA. """ return nx.from_edgelist(e.targets for e in isa.edges if not e.dead)
Construct a NetworkX qubit topology from an ISA object. This discards information about supported gates. :param isa: The ISA.
def _markup(p_todo, p_focus): """ Returns an attribute spec for the colors that correspond to the given todo item. """ pri = p_todo.priority() pri = 'pri_' + pri if pri else PaletteItem.DEFAULT if not p_focus: attr_dict = {None: pri} else: # use '_focus' palette entries instead of standard ones attr_dict = {None: pri + '_focus'} attr_dict[PaletteItem.PROJECT] = PaletteItem.PROJECT_FOCUS attr_dict[PaletteItem.CONTEXT] = PaletteItem.CONTEXT_FOCUS attr_dict[PaletteItem.METADATA] = PaletteItem.METADATA_FOCUS attr_dict[PaletteItem.LINK] = PaletteItem.LINK_FOCUS return attr_dict
Returns an attribute spec for the colors that correspond to the given todo item.
def build_request(self, path, query_parameters): """ Build the HTTP request by adding query parameters to the path. :param path: API endpoint/path to be used. :param query_parameters: Query parameters to be added to the request. :return: string """ url = 'https://api.uber.com/v1' + self.sanitise_path(path) url += '?' + urlencode(query_parameters) return url
Build the HTTP request by adding query parameters to the path. :param path: API endpoint/path to be used. :param query_parameters: Query parameters to be added to the request. :return: string
def refine_rectwv_coeff(input_image, rectwv_coeff, refine_wavecalib_mode, minimum_slitlet_width_mm, maximum_slitlet_width_mm, save_intermediate_results=False, debugplot=0): """Refine RectWaveCoeff object using a catalogue of lines One and only one among refine_with_oh_lines_mode and refine_with_arc_lines must be different from zero. Parameters ---------- input_image : HDUList object Input 2D image. rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. refine_wavecalib_mode : int Integer, indicating the type of refinement: 0 : no refinement 1 : apply the same global offset to all the slitlets (using ARC lines) 2 : apply individual offset to each slitlet (using ARC lines) 11 : apply the same global offset to all the slitlets (using OH lines) 12 : apply individual offset to each slitlet (using OH lines) minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. save_intermediate_results : bool If True, save plots in PDF files debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- refined_rectwv_coeff : RectWaveCoeff instance Refined rectification and wavelength calibration coefficients for the particular CSU configuration. expected_cat_image : HDUList object Output 2D image with the expected catalogued lines. """ logger = logging.getLogger(__name__) if save_intermediate_results: from matplotlib.backends.backend_pdf import PdfPages pdf = PdfPages('crosscorrelation.pdf') else: pdf = None # image header main_header = input_image[0].header filter_name = main_header['filter'] grism_name = main_header['grism'] # protections if refine_wavecalib_mode not in [1, 2, 11, 12]: logger.error('Wavelength calibration refinemente mode={}'. format( refine_wavecalib_mode )) raise ValueError("Invalid wavelength calibration refinement mode") # read tabulated lines if refine_wavecalib_mode in [1, 2]: # ARC lines if grism_name == 'LR': catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat' else: catlines_file = 'lines_argon_neon_xenon_empirical.dat' dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file) arc_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(arc_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = catlines[:, 0] catlines_all_flux = catlines[:, 1] mode = refine_wavecalib_mode elif refine_wavecalib_mode in [11, 12]: # OH lines dumdata = pkgutil.get_data( 'emirdrp.instrument.configs', 'Oliva_etal_2013.dat' ) oh_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(oh_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0])) catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2])) mode = refine_wavecalib_mode - 10 else: raise ValueError('Unexpected mode={}'.format(refine_wavecalib_mode)) # initialize output refined_rectwv_coeff = deepcopy(rectwv_coeff) logger.info('Computing median spectrum') # compute median spectrum and normalize it sp_median = median_slitlets_rectified( input_image, mode=2, minimum_slitlet_width_mm=minimum_slitlet_width_mm, maximum_slitlet_width_mm=maximum_slitlet_width_mm )[0].data sp_median /= sp_median.max() # determine minimum and maximum useful wavelength jmin, jmax = find_pix_borders(sp_median, 0) naxis1 = main_header['naxis1'] naxis2 = main_header['naxis2'] crpix1 = main_header['crpix1'] crval1 = main_header['crval1'] cdelt1 = main_header['cdelt1'] xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1 if grism_name == 'LR': wv_parameters = set_wv_parameters(filter_name, grism_name) wave_min = wv_parameters['wvmin_useful'] wave_max = wv_parameters['wvmax_useful'] else: wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1 wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1 logger.info('Setting wave_min to {}'.format(wave_min)) logger.info('Setting wave_max to {}'.format(wave_max)) # extract subset of catalogue lines within current wavelength range lok1 = catlines_all_wave >= wave_min lok2 = catlines_all_wave <= wave_max catlines_reference_wave = catlines_all_wave[lok1*lok2] catlines_reference_flux = catlines_all_flux[lok1*lok2] catlines_reference_flux /= catlines_reference_flux.max() # estimate sigma to broaden catalogue lines csu_config = CsuConfiguration.define_from_header(main_header) # segregate slitlets list_useful_slitlets = csu_config.widths_in_range_mm( minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm ) # remove missing slitlets if len(refined_rectwv_coeff.missing_slitlets) > 0: for iremove in refined_rectwv_coeff.missing_slitlets: if iremove in list_useful_slitlets: list_useful_slitlets.remove(iremove) list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1)) if i not in list_useful_slitlets] logger.info('list of useful slitlets: {}'.format( list_useful_slitlets)) logger.info('list of not useful slitlets: {}'.format( list_not_useful_slitlets)) tempwidths = np.array([csu_config.csu_bar_slit_width(islitlet) for islitlet in list_useful_slitlets]) widths_summary = summary(tempwidths) logger.info('Statistics of useful slitlet widths (mm):') logger.info('- npoints....: {0:d}'.format(widths_summary['npoints'])) logger.info('- mean.......: {0:7.3f}'.format(widths_summary['mean'])) logger.info('- median.....: {0:7.3f}'.format(widths_summary['median'])) logger.info('- std........: {0:7.3f}'.format(widths_summary['std'])) logger.info('- robust_std.: {0:7.3f}'.format(widths_summary['robust_std'])) # empirical transformation of slit width (mm) to pixels sigma_broadening = cdelt1 * widths_summary['median'] # convolve location of catalogue lines to generate expected spectrum xwave_reference, sp_reference = convolve_comb_lines( catlines_reference_wave, catlines_reference_flux, sigma_broadening, crpix1, crval1, cdelt1, naxis1 ) sp_reference /= sp_reference.max() # generate image2d with expected lines image2d_expected_lines = np.tile(sp_reference, (naxis2, 1)) hdu = fits.PrimaryHDU(data=image2d_expected_lines, header=main_header) expected_cat_image = fits.HDUList([hdu]) if (abs(debugplot) % 10 != 0) or (pdf is not None): ax = ximplotxy(xwave, sp_median, 'C1-', xlabel='Wavelength (Angstroms, in vacuum)', ylabel='Normalized number of counts', title='Median spectrum', label='observed spectrum', show=False) # overplot reference catalogue lines ax.stem(catlines_reference_wave, catlines_reference_flux, 'C4-', markerfmt=' ', basefmt='C4-', label='tabulated lines') # overplot convolved reference lines ax.plot(xwave_reference, sp_reference, 'C0-', label='expected spectrum') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) # compute baseline signal in sp_median baseline = np.percentile(sp_median[sp_median > 0], q=10) if (abs(debugplot) % 10 != 0) or (pdf is not None): fig = plt.figure() ax = fig.add_subplot(111) ax.hist(sp_median, bins=1000, log=True) ax.set_xlabel('Normalized number of counts') ax.set_ylabel('Number of pixels') ax.set_title('Median spectrum') ax.axvline(float(baseline), linestyle='--', color='grey') if pdf is not None: pdf.savefig() else: geometry = (0, 0, 640, 480) set_window_geometry(geometry) plt.show() # subtract baseline to sp_median (only pixels with signal above zero) lok = np.where(sp_median > 0) sp_median[lok] -= baseline # compute global offset through periodic correlation logger.info('Computing global offset') global_offset, fpeak = periodic_corr1d( sp_reference=sp_reference, sp_offset=sp_median, fminmax=None, naround_zero=50, plottitle='Median spectrum (cross-correlation)', pdf=pdf, debugplot=debugplot ) logger.info('Global offset: {} pixels'.format(-global_offset)) missing_slitlets = rectwv_coeff.missing_slitlets if mode == 1: # apply computed offset to obtain refined_rectwv_coeff_global for islitlet in range(1, EMIR_NBARS + 1): if islitlet not in missing_slitlets: i = islitlet - 1 dumdict = refined_rectwv_coeff.contents[i] dumdict['wpoly_coeff'][0] -= global_offset*cdelt1 elif mode == 2: # compute individual offset for each slitlet logger.info('Computing individual offsets') median_55sp = median_slitlets_rectified(input_image, mode=1) offset_array = np.zeros(EMIR_NBARS) xplot = [] yplot = [] xplot_skipped = [] yplot_skipped = [] cout = '0' for islitlet in range(1, EMIR_NBARS + 1): if islitlet in list_useful_slitlets: i = islitlet - 1 sp_median = median_55sp[0].data[i, :] lok = np.where(sp_median > 0) if np.any(lok): baseline = np.percentile(sp_median[lok], q=10) sp_median[lok] -= baseline sp_median /= sp_median.max() offset_array[i], fpeak = periodic_corr1d( sp_reference=sp_reference, sp_offset=median_55sp[0].data[i, :], fminmax=None, naround_zero=50, plottitle='slitlet #{0} (cross-correlation)'.format( islitlet), pdf=pdf, debugplot=debugplot ) else: offset_array[i] = 0.0 dumdict = refined_rectwv_coeff.contents[i] dumdict['wpoly_coeff'][0] -= offset_array[i]*cdelt1 xplot.append(islitlet) yplot.append(-offset_array[i]) # second correction wpoly_coeff_refined = check_wlcalib_sp( sp=median_55sp[0].data[i, :], crpix1=crpix1, crval1=crval1-offset_array[i]*cdelt1, cdelt1=cdelt1, wv_master=catlines_reference_wave, coeff_ini=dumdict['wpoly_coeff'], naxis1_ini=EMIR_NAXIS1, title='slitlet #{0} (after applying offset)'.format( islitlet), ylogscale=False, pdf=pdf, debugplot=debugplot ) dumdict['wpoly_coeff'] = wpoly_coeff_refined cout += '.' else: xplot_skipped.append(islitlet) yplot_skipped.append(0) cout += 'i' if islitlet % 10 == 0: if cout != 'i': cout = str(islitlet // 10) logger.info(cout) # show offsets with opposite sign stat_summary = summary(np.array(yplot)) logger.info('Statistics of individual slitlet offsets (pixels):') logger.info('- npoints....: {0:d}'.format(stat_summary['npoints'])) logger.info('- mean.......: {0:7.3f}'.format(stat_summary['mean'])) logger.info('- median.....: {0:7.3f}'.format(stat_summary['median'])) logger.info('- std........: {0:7.3f}'.format(stat_summary['std'])) logger.info('- robust_std.: {0:7.3f}'.format(stat_summary[ 'robust_std'])) if (abs(debugplot) % 10 != 0) or (pdf is not None): ax = ximplotxy(xplot, yplot, linestyle='', marker='o', color='C0', xlabel='slitlet number', ylabel='-offset (pixels) = offset to be applied', title='cross-correlation result', show=False, **{'label': 'individual slitlets'}) if len(xplot_skipped) > 0: ax.plot(xplot_skipped, yplot_skipped, 'mx') ax.axhline(-global_offset, linestyle='--', color='C1', label='global offset') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) else: raise ValueError('Unexpected mode={}'.format(mode)) # close output PDF file if pdf is not None: pdf.close() # return result return refined_rectwv_coeff, expected_cat_image
Refine RectWaveCoeff object using a catalogue of lines One and only one among refine_with_oh_lines_mode and refine_with_arc_lines must be different from zero. Parameters ---------- input_image : HDUList object Input 2D image. rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. refine_wavecalib_mode : int Integer, indicating the type of refinement: 0 : no refinement 1 : apply the same global offset to all the slitlets (using ARC lines) 2 : apply individual offset to each slitlet (using ARC lines) 11 : apply the same global offset to all the slitlets (using OH lines) 12 : apply individual offset to each slitlet (using OH lines) minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. save_intermediate_results : bool If True, save plots in PDF files debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- refined_rectwv_coeff : RectWaveCoeff instance Refined rectification and wavelength calibration coefficients for the particular CSU configuration. expected_cat_image : HDUList object Output 2D image with the expected catalogued lines.
def _register_socket(s): ''' Internal method used by socket emulation layer to create a new "upward" queue for an app-layer socket and to register the socket object. Returns two queues: "downward" (fromapp) and "upward" (toapp). ''' queue_to_app = Queue() with _lock: ApplicationLayer._to_app[s._sockid()] = queue_to_app return ApplicationLayer._from_app, queue_to_app
Internal method used by socket emulation layer to create a new "upward" queue for an app-layer socket and to register the socket object. Returns two queues: "downward" (fromapp) and "upward" (toapp).
def add_bgp_speaker_to_dragent(self, bgp_dragent, body): """Adds a BGP speaker to Dynamic Routing agent.""" return self.post((self.agent_path + self.BGP_DRINSTANCES) % bgp_dragent, body=body)
Adds a BGP speaker to Dynamic Routing agent.
def _handle_blacklisted_tag(self): """Handle the body of an HTML tag that is parser-blacklisted.""" strip = lambda text: text.rstrip().lower() while True: this, next = self._read(), self._read(1) if this is self.END: self._fail_route() elif this == "<" and next == "/": self._head += 3 if self._read() != ">" or (strip(self._read(-1)) != strip(self._stack[1].text)): self._head -= 1 self._emit_text("</") continue self._emit(tokens.TagOpenClose()) self._emit_text(self._read(-1)) self._emit(tokens.TagCloseClose()) return self._pop() elif this == "&": self._parse_entity() else: self._emit_text(this) self._head += 1
Handle the body of an HTML tag that is parser-blacklisted.
def open_file(link, session=None, stream=True): """ Open local or remote file for reading. :type link: pip._internal.index.Link or str :type session: requests.Session :param bool stream: Try to stream if remote, default True :raises ValueError: If link points to a local directory. :return: a context manager to the opened file-like object """ if not isinstance(link, six.string_types): try: link = link.url_without_fragment except AttributeError: raise ValueError("Cannot parse url from unkown type: {0!r}".format(link)) if not is_valid_url(link) and os.path.exists(link): link = path_to_url(link) if is_file_url(link): # Local URL local_path = url_to_path(link) if os.path.isdir(local_path): raise ValueError("Cannot open directory for read: {}".format(link)) else: with io.open(local_path, "rb") as local_file: yield local_file else: # Remote URL headers = {"Accept-Encoding": "identity"} if not session: from requests import Session session = Session() with session.get(link, headers=headers, stream=stream) as resp: try: raw = getattr(resp, "raw", None) result = raw if raw else resp yield result finally: if raw: conn = getattr(raw, "_connection") if conn is not None: conn.close() result.close()
Open local or remote file for reading. :type link: pip._internal.index.Link or str :type session: requests.Session :param bool stream: Try to stream if remote, default True :raises ValueError: If link points to a local directory. :return: a context manager to the opened file-like object
def iter_sections(self, order=Tree.ipreorder, neurite_order=NeuriteIter.FileOrder): '''iteration over section nodes Parameters: order: section iteration order within a given neurite. Must be one of: Tree.ipreorder: Depth-first pre-order iteration of tree nodes Tree.ipreorder: Depth-first post-order iteration of tree nodes Tree.iupstream: Iterate from a tree node to the root nodes Tree.ibifurcation_point: Iterator to bifurcation points Tree.ileaf: Iterator to all leaves of a tree neurite_order: order upon which neurites should be iterated. Values: - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical ''' return iter_sections(self, iterator_type=order, neurite_order=neurite_order)
iteration over section nodes Parameters: order: section iteration order within a given neurite. Must be one of: Tree.ipreorder: Depth-first pre-order iteration of tree nodes Tree.ipreorder: Depth-first post-order iteration of tree nodes Tree.iupstream: Iterate from a tree node to the root nodes Tree.ibifurcation_point: Iterator to bifurcation points Tree.ileaf: Iterator to all leaves of a tree neurite_order: order upon which neurites should be iterated. Values: - NeuriteIter.FileOrder: order of appearance in the file - NeuriteIter.NRN: NRN simulator order: soma -> axon -> basal -> apical
def if_else(self, pred, likely=None): """ A context manager which sets up two conditional basic blocks based on the given predicate (a i1 value). A tuple of context managers is yield'ed. Each context manager acts as a if_then() block. *likely* has the same meaning as in if_then(). Typical use:: with builder.if_else(pred) as (then, otherwise): with then: # emit instructions for when the predicate is true with otherwise: # emit instructions for when the predicate is false """ bb = self.basic_block bbif = self.append_basic_block(name=_label_suffix(bb.name, '.if')) bbelse = self.append_basic_block(name=_label_suffix(bb.name, '.else')) bbend = self.append_basic_block(name=_label_suffix(bb.name, '.endif')) br = self.cbranch(pred, bbif, bbelse) if likely is not None: br.set_weights([99, 1] if likely else [1, 99]) then = self._branch_helper(bbif, bbend) otherwise = self._branch_helper(bbelse, bbend) yield then, otherwise self.position_at_end(bbend)
A context manager which sets up two conditional basic blocks based on the given predicate (a i1 value). A tuple of context managers is yield'ed. Each context manager acts as a if_then() block. *likely* has the same meaning as in if_then(). Typical use:: with builder.if_else(pred) as (then, otherwise): with then: # emit instructions for when the predicate is true with otherwise: # emit instructions for when the predicate is false
def sha256(content): """Finds the sha256 hash of the content.""" if isinstance(content, str): content = content.encode('utf-8') return hashlib.sha256(content).hexdigest()
Finds the sha256 hash of the content.
def internal_only(view_func): """ A view decorator which blocks access for requests coming through the load balancer. """ @functools.wraps(view_func) def wrapper(request, *args, **kwargs): forwards = request.META.get("HTTP_X_FORWARDED_FOR", "").split(",") # The nginx in the docker container adds the loadbalancer IP to the list inside # X-Forwarded-For, so if the list contains more than a single item, we know # that it went through our loadbalancer if len(forwards) > 1: raise PermissionDenied() return view_func(request, *args, **kwargs) return wrapper
A view decorator which blocks access for requests coming through the load balancer.
def time_relaxations_direct(P, p0, obs, times=[1]): r"""Compute time-relaxations of obs with respect of given initial distribution. relaxation(k) = p0 P^k obs Parameters ---------- P : ndarray, shape=(n, n) or scipy.sparse matrix Transition matrix p0 : ndarray, shape=(n) initial distribution obs : ndarray, shape=(n) Vector representing observable on discrete states. times : array-like, shape(n_t) Vector of time points at which the (auto)correlation will be evaluated Returns ------- relaxations : ndarray, shape(n_t) """ n_t = len(times) times = np.sort(times) # maximum time > number of rows? if times[-1] > P.shape[0]: use_diagonalization = True R, D, L = rdl_decomposition(P) # discard imaginary part, if all elements i=0 if not np.any(np.iscomplex(R)): R = np.real(R) if not np.any(np.iscomplex(D)): D = np.real(D) if not np.any(np.iscomplex(L)): L = np.real(L) rdl = (R, D, L) f = np.empty(n_t, dtype=D.dtype) if use_diagonalization: for i in range(n_t): f[i] = time_relaxation_direct_by_diagonalization(P, p0, obs, times[i], rdl) else: start_values = None for i in range(n_t): f[i], start_values = time_relaxation_direct_by_mtx_vec_prod(P, p0, obs, times[i], start_values, True) return f
r"""Compute time-relaxations of obs with respect of given initial distribution. relaxation(k) = p0 P^k obs Parameters ---------- P : ndarray, shape=(n, n) or scipy.sparse matrix Transition matrix p0 : ndarray, shape=(n) initial distribution obs : ndarray, shape=(n) Vector representing observable on discrete states. times : array-like, shape(n_t) Vector of time points at which the (auto)correlation will be evaluated Returns ------- relaxations : ndarray, shape(n_t)
def _get_timit(directory): """Extract TIMIT datasets to directory unless directory/timit exists.""" if os.path.exists(os.path.join(directory, "timit")): return assert FLAGS.timit_paths for path in FLAGS.timit_paths.split(","): with tf.gfile.GFile(path) as f: with tarfile.open(fileobj=f, mode="r:gz") as timit_compressed: timit_compressed.extractall(directory)
Extract TIMIT datasets to directory unless directory/timit exists.
def run_command(self, args): """ Parse command line arguments and run function registered for the appropriate command. :param args: [str] command line arguments """ parsed_args = self.parser.parse_args(args) if hasattr(parsed_args, 'func'): parsed_args.func(parsed_args) else: self.parser.print_help()
Parse command line arguments and run function registered for the appropriate command. :param args: [str] command line arguments
def get_list_database(self): """Get the list of databases.""" url = "db" response = self.request( url=url, method='GET', expected_response_code=200 ) return response.json()
Get the list of databases.
def _descendants(self): """ Scans full list of node descendants. :return: Generator of nodes. """ children = self._children if children is not None: for child in children.values(): yield from child._descendants yield child
Scans full list of node descendants. :return: Generator of nodes.
def update(self): """Compute the next element in the stream, and update the plot data""" # Update the simulated pricing data self.t += 1000 / INTERVAL self.average *= np.random.lognormal(0, 0.04) high = self.average * np.exp(np.abs(np.random.gamma(1, 0.03))) low = self.average / np.exp(np.abs(np.random.gamma(1, 0.03))) delta = high - low open = low + delta * np.random.uniform(0.05, 0.95) close = low + delta * np.random.uniform(0.05, 0.95) color = "darkgreen" if open < close else "darkred" for k, point in [('time', self.t), ('average', self.average), ('open', open), ('high', high), ('low', low), ('close', close), ('color', color)]: self.data[k].append(point) ema12 = self._ema(self.data['close'], self.kernel12) ema26 = self._ema(self.data['close'], self.kernel26) macd = ema12 - ema26 self.data['ma'].append(ema12) self.data['macd'].append(macd) macd9 = self._ema(self.data['macd'], self.kernel9) self.data['macd9'].append(macd9) self.data['macdh'].append(macd - macd9)
Compute the next element in the stream, and update the plot data
def UpdateUser(self, user, ssh_keys): """Update a Linux user with authorized SSH keys. Args: user: string, the name of the Linux user account. ssh_keys: list, the SSH key strings associated with the user. Returns: bool, True if the user account updated successfully. """ if not bool(USER_REGEX.match(user)): self.logger.warning('Invalid user account name %s.', user) return False if not self._GetUser(user): # User does not exist. Attempt to create the user and add them to the # appropriate user groups. if not (self._AddUser(user) and self._UpdateUserGroups(user, self.groups)): return False # Add the user to the google sudoers group. if not self._UpdateSudoer(user, sudoer=True): return False # Don't try to manage account SSH keys with a shell set to disable # logins. This helps avoid problems caused by operator and root sharing # a home directory in CentOS and RHEL. pw_entry = self._GetUser(user) if pw_entry and os.path.basename(pw_entry.pw_shell) == 'nologin': message = 'Not updating user %s. User set `nologin` as login shell.' self.logger.debug(message, user) return True try: self._UpdateAuthorizedKeys(user, ssh_keys) except (IOError, OSError) as e: message = 'Could not update the authorized keys file for user %s. %s.' self.logger.warning(message, user, str(e)) return False else: return True
Update a Linux user with authorized SSH keys. Args: user: string, the name of the Linux user account. ssh_keys: list, the SSH key strings associated with the user. Returns: bool, True if the user account updated successfully.
def remove_point(self, time): """Remove a point, if no point is found nothing happens. :param int time: Time of the point. :raises TierTypeException: If the tier is not a TextTier. """ if self.tier_type != 'TextTier': raise Exception('Tiertype must be TextTier.') self.intervals = [i for i in self.intervals if i[0] != time]
Remove a point, if no point is found nothing happens. :param int time: Time of the point. :raises TierTypeException: If the tier is not a TextTier.
def shutdown(self, reason = ConnectionClosed()): """Shutdown the socket server. The socket server will stop accepting incoming connections. All connections will be dropped. """ if self._shutdown: raise ShutdownError() self.stop() self._closing = True for connection in self.connections: connection.close() self.connections = set() self._shutdown = True if isinstance(reason, ConnectionClosed): logger.info("server shutdown") else: logger.warn("server shutdown, reason %s" % str(reason))
Shutdown the socket server. The socket server will stop accepting incoming connections. All connections will be dropped.
def connect(args): """ %prog connect assembly.fasta read_mapping.blast Connect contigs using long reads. """ p = OptionParser(connect.__doc__) p.add_option("--clip", default=2000, type="int", help="Only consider end of contigs [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, blastfile = args clip = opts.clip sizes = Sizes(fastafile).mapping blast = Blast(blastfile) blasts = [] for b in blast: seqid = b.subject size = sizes[seqid] start, end = b.sstart, b.sstop cstart, cend = min(size, clip), max(0, size - clip) if start > cstart and end < cend: continue blasts.append(b) key = lambda x: x.query blasts.sort(key=key) g = BiGraph() for query, bb in groupby(blasts, key=key): bb = sorted(bb, key=lambda x: x.qstart) nsubjects = len(set(x.subject for x in bb)) if nsubjects == 1: continue print("\n".join(str(x) for x in bb)) for a, b in pairwise(bb): astart, astop = a.qstart, a.qstop bstart, bstop = b.qstart, b.qstop if a.subject == b.subject: continue arange = astart, astop brange = bstart, bstop ov = range_intersect(arange, brange) alen = astop - astart + 1 blen = bstop - bstart + 1 if ov: ostart, ostop = ov ov = ostop - ostart + 1 print(ov, alen, blen) if ov and (ov > alen / 2 or ov > blen / 2): print("Too much overlap ({0})".format(ov)) continue asub = a.subject bsub = b.subject atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" g.add_edge(asub, bsub, atag, btag) graph_to_agp(g, blastfile, fastafile, verbose=False)
%prog connect assembly.fasta read_mapping.blast Connect contigs using long reads.
def competition_view_leaderboard(self, id, **kwargs): # noqa: E501 """VIew competition leaderboard # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.competition_view_leaderboard(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: Competition name (required) :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.competition_view_leaderboard_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.competition_view_leaderboard_with_http_info(id, **kwargs) # noqa: E501 return data
VIew competition leaderboard # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.competition_view_leaderboard(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: Competition name (required) :return: Result If the method is called asynchronously, returns the request thread.
def matchmaker_delete(institute_id, case_name): """Remove a case from MatchMaker""" # check that only authorized users can delete patients from MME user_obj = store.user(current_user.email) if 'mme_submitter' not in user_obj['roles']: flash('unauthorized request', 'warning') return redirect(request.referrer) institute_obj, case_obj = institute_and_case(store, institute_id, case_name) # Required params for sending a delete request to MME: mme_base_url = current_app.config.get('MME_URL') mme_token = current_app.config.get('MME_TOKEN') if not mme_base_url or not mme_token: flash('An error occurred reading matchmaker connection parameters. Please check config file!', 'danger') return redirect(request.referrer) delete_result = controllers.mme_delete(case_obj, mme_base_url, mme_token) n_deleted = 0 category = 'warning' for resp in delete_result: if resp['status_code'] == 200: n_deleted += 1 else: flash(resp['message'], category) if n_deleted: category = 'success' # update case by removing mme submission # and create events for patients deletion from MME user_obj = store.user(current_user.email) store.case_mme_delete(case_obj=case_obj, user_obj=user_obj) flash('Number of patients deleted from Matchmaker: {} out of {}'.format(n_deleted, len(delete_result)), category) return redirect(request.referrer)
Remove a case from MatchMaker
def plot_periodicvar_recovery_results( precvar_results, aliases_count_as_recovered=None, magbins=None, periodbins=None, amplitudebins=None, ndetbins=None, minbinsize=1, plotfile_ext='png', ): '''This plots the results of periodic var recovery. This function makes plots for periodicvar recovered fraction as a function of: - magbin - periodbin - amplitude of variability - ndet with plot lines broken down by: - magcol - periodfinder - vartype - recovery status The kwargs `magbins`, `periodbins`, `amplitudebins`, and `ndetbins` can be used to set the bin lists as needed. The kwarg `minbinsize` controls how many elements per bin are required to accept a bin in processing its recovery characteristics for mags, periods, amplitudes, and ndets. Parameters ---------- precvar_results : dict or str This is either a dict returned by parallel_periodicvar_recovery or the pickle created by that function. aliases_count_as_recovered : list of str or 'all' This is used to set which kinds of aliases this function considers as 'recovered' objects. Normally, we require that recovered objects have a recovery status of 'actual' to indicate the actual period was recovered. To change this default behavior, aliases_count_as_recovered can be set to a list of alias status strings that should be considered as 'recovered' objects as well. Choose from the following alias types:: 'twice' recovered_p = 2.0*actual_p 'half' recovered_p = 0.5*actual_p 'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p) 'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p) 'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p) 'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p) 'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p) 'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p) 'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0) 'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0) or set `aliases_count_as_recovered='all'` to include all of the above in the 'recovered' periodic var list. magbins : np.array The magnitude bins to plot the recovery rate results over. If None, the default mag bins will be used: `np.arange(8.0,16.25,0.25)`. periodbins : np.array The period bins to plot the recovery rate results over. If None, the default period bins will be used: `np.arange(0.0,500.0,0.5)`. amplitudebins : np.array The variability amplitude bins to plot the recovery rate results over. If None, the default amplitude bins will be used: `np.arange(0.0,2.0,0.05)`. ndetbins : np.array The ndet bins to plot the recovery rate results over. If None, the default ndet bins will be used: `np.arange(0.0,60000.0,1000.0)`. minbinsize : int The minimum number of objects per bin required to plot a bin and its recovery fraction on the plot. plotfile_ext : {'png','pdf'} Sets the plot output files' extension. Returns ------- dict A dict containing recovery fraction statistics and the paths to each of the plots made. ''' # get the result pickle/dict if isinstance(precvar_results, str) and os.path.exists(precvar_results): with open(precvar_results,'rb') as infd: precvar = pickle.load(infd) elif isinstance(precvar_results, dict): precvar = precvar_results else: LOGERROR('could not understand the input ' 'periodic var recovery dict/pickle') return None # get the simbasedir and open the fakelc-info.pkl. we'll need the magbins # definition from here. simbasedir = precvar['simbasedir'] lcinfof = os.path.join(simbasedir,'fakelcs-info.pkl') if not os.path.exists(lcinfof): LOGERROR('fakelcs-info.pkl does not exist in %s, can\'t continue' % simbasedir) return None with open(lcinfof,'rb') as infd: lcinfo = pickle.load(infd) # get the magcols, vartypes, sdssr, isvariable flags magcols = lcinfo['magcols'] objectid = lcinfo['objectid'] ndet = lcinfo['ndet'] sdssr = lcinfo['sdssr'] # get the actual periodic vars actual_periodicvars = precvar['actual_periodicvars'] # generate lists of objects binned by magbins and periodbins LOGINFO('getting sdssr and ndet for actual periodic vars...') # get the sdssr and ndet for all periodic vars periodicvar_sdssr = [] periodicvar_ndet = [] periodicvar_objectids = [] for pobj in actual_periodicvars: pobjind = objectid == pobj periodicvar_objectids.append(pobj) periodicvar_sdssr.append(sdssr[pobjind]) periodicvar_ndet.append(ndet[pobjind]) periodicvar_sdssr = np.array(periodicvar_sdssr) periodicvar_objectids = np.array(periodicvar_objectids) periodicvar_ndet = np.array(periodicvar_ndet) LOGINFO('getting periods, vartypes, ' 'amplitudes, ndet for actual periodic vars...') # get the periods, vartypes, amplitudes for the actual periodic vars periodicvar_periods = [ np.asscalar(precvar['details'][x]['actual_varperiod']) for x in periodicvar_objectids ] periodicvar_amplitudes = [ np.asscalar(precvar['details'][x]['actual_varamplitude']) for x in periodicvar_objectids ] periodicvar_vartypes = [ precvar['details'][x]['actual_vartype'] for x in periodicvar_objectids ] # # do the binning # # bin by mag LOGINFO('binning actual periodic vars by magnitude...') magbinned_sdssr = [] magbinned_periodicvars = [] if not magbins: magbins = PERIODREC_DEFAULT_MAGBINS magbininds = np.digitize(np.ravel(periodicvar_sdssr), magbins) for mbinind, magi in zip(np.unique(magbininds), range(len(magbins)-1)): thisbin_periodicvars = periodicvar_objectids[magbininds == mbinind] if (thisbin_periodicvars.size > (minbinsize-1)): magbinned_sdssr.append((magbins[magi] + magbins[magi+1])/2.0) magbinned_periodicvars.append(thisbin_periodicvars) # bin by period LOGINFO('binning actual periodic vars by period...') periodbinned_periods = [] periodbinned_periodicvars = [] if not periodbins: periodbins = PERIODREC_DEFAULT_PERIODBINS periodbininds = np.digitize(np.ravel(periodicvar_periods), periodbins) for pbinind, peri in zip(np.unique(periodbininds), range(len(periodbins)-1)): thisbin_periodicvars = periodicvar_objectids[periodbininds == pbinind] if (thisbin_periodicvars.size > (minbinsize-1)): periodbinned_periods.append((periodbins[peri] + periodbins[peri+1])/2.0) periodbinned_periodicvars.append(thisbin_periodicvars) # bin by amplitude of variability LOGINFO('binning actual periodic vars by variability amplitude...') amplitudebinned_amplitudes = [] amplitudebinned_periodicvars = [] if not amplitudebins: amplitudebins = PERIODREC_DEFAULT_AMPBINS amplitudebininds = np.digitize(np.ravel(np.abs(periodicvar_amplitudes)), amplitudebins) for abinind, ampi in zip(np.unique(amplitudebininds), range(len(amplitudebins)-1)): thisbin_periodicvars = periodicvar_objectids[ amplitudebininds == abinind ] if (thisbin_periodicvars.size > (minbinsize-1)): amplitudebinned_amplitudes.append( (amplitudebins[ampi] + amplitudebins[ampi+1])/2.0 ) amplitudebinned_periodicvars.append(thisbin_periodicvars) # bin by ndet LOGINFO('binning actual periodic vars by ndet...') ndetbinned_ndets = [] ndetbinned_periodicvars = [] if not ndetbins: ndetbins = PERIODREC_DEFAULT_NDETBINS ndetbininds = np.digitize(np.ravel(periodicvar_ndet), ndetbins) for nbinind, ndeti in zip(np.unique(ndetbininds), range(len(ndetbins)-1)): thisbin_periodicvars = periodicvar_objectids[ndetbininds == nbinind] if (thisbin_periodicvars.size > (minbinsize-1)): ndetbinned_ndets.append( (ndetbins[ndeti] + ndetbins[ndeti+1])/2.0 ) ndetbinned_periodicvars.append(thisbin_periodicvars) # now figure out what 'recovered' means using the provided # aliases_count_as_recovered kwarg recovered_status = ['actual'] if isinstance(aliases_count_as_recovered, list): for atype in aliases_count_as_recovered: if atype in ALIAS_TYPES: recovered_status.append(atype) else: LOGWARNING('unknown alias type: %s, skipping' % atype) elif aliases_count_as_recovered and aliases_count_as_recovered == 'all': for atype in ALIAS_TYPES[1:]: recovered_status.append(atype) # find all the matching objects for these recovered statuses recovered_periodicvars = np.array( [precvar['details'][x]['objectid'] for x in precvar['details'] if (precvar['details'][x] is not None and precvar['details'][x]['best_recovered_status'] in recovered_status)], dtype=np.unicode_ ) LOGINFO('recovered %s/%s periodic variables (frac: %.3f) with ' 'period recovery status: %s' % (recovered_periodicvars.size, actual_periodicvars.size, float(recovered_periodicvars.size/actual_periodicvars.size), ', '.join(recovered_status))) # get the objects recovered per bin and overall recovery fractions per bin magbinned_recovered_objects = [ np.intersect1d(x,recovered_periodicvars) for x in magbinned_periodicvars ] magbinned_recfrac = np.array([float(x.size/y.size) for x,y in zip(magbinned_recovered_objects, magbinned_periodicvars)]) periodbinned_recovered_objects = [ np.intersect1d(x,recovered_periodicvars) for x in periodbinned_periodicvars ] periodbinned_recfrac = np.array([float(x.size/y.size) for x,y in zip(periodbinned_recovered_objects, periodbinned_periodicvars)]) amplitudebinned_recovered_objects = [ np.intersect1d(x,recovered_periodicvars) for x in amplitudebinned_periodicvars ] amplitudebinned_recfrac = np.array( [float(x.size/y.size) for x,y in zip(amplitudebinned_recovered_objects, amplitudebinned_periodicvars)] ) ndetbinned_recovered_objects = [ np.intersect1d(x,recovered_periodicvars) for x in ndetbinned_periodicvars ] ndetbinned_recfrac = np.array([float(x.size/y.size) for x,y in zip(ndetbinned_recovered_objects, ndetbinned_periodicvars)]) # convert the bin medians to arrays magbinned_sdssr = np.array(magbinned_sdssr) periodbinned_periods = np.array(periodbinned_periods) amplitudebinned_amplitudes = np.array(amplitudebinned_amplitudes) ndetbinned_ndets = np.array(ndetbinned_ndets) # this is the initial output dict outdict = { 'simbasedir':simbasedir, 'precvar_results':precvar, 'magcols':magcols, 'objectids':objectid, 'ndet':ndet, 'sdssr':sdssr, 'actual_periodicvars':actual_periodicvars, 'recovered_periodicvars':recovered_periodicvars, 'recovery_definition':recovered_status, # mag binned actual periodicvars # note that only bins with nobjects > minbinsize are included 'magbins':magbins, 'magbinned_mags':magbinned_sdssr, 'magbinned_periodicvars':magbinned_periodicvars, 'magbinned_recoveredvars':magbinned_recovered_objects, 'magbinned_recfrac':magbinned_recfrac, # period binned actual periodicvars # note that only bins with nobjects > minbinsize are included 'periodbins':periodbins, 'periodbinned_periods':periodbinned_periods, 'periodbinned_periodicvars':periodbinned_periodicvars, 'periodbinned_recoveredvars':periodbinned_recovered_objects, 'periodbinned_recfrac':periodbinned_recfrac, # amplitude binned actual periodicvars # note that only bins with nobjects > minbinsize are included 'amplitudebins':amplitudebins, 'amplitudebinned_amplitudes':amplitudebinned_amplitudes, 'amplitudebinned_periodicvars':amplitudebinned_periodicvars, 'amplitudebinned_recoveredvars':amplitudebinned_recovered_objects, 'amplitudebinned_recfrac':amplitudebinned_recfrac, # ndet binned actual periodicvars # note that only bins with nobjects > minbinsize are included 'ndetbins':ndetbins, 'ndetbinned_ndets':ndetbinned_ndets, 'ndetbinned_periodicvars':ndetbinned_periodicvars, 'ndetbinned_recoveredvars':ndetbinned_recovered_objects, 'ndetbinned_recfrac':ndetbinned_recfrac, } # figure out which pfmethods were used all_pfmethods = np.unique( np.concatenate( [np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']] ) ) # figure out all vartypes all_vartypes = np.unique( [(precvar['details'][x]['actual_vartype']) for x in precvar['details'] if (precvar['details'][x]['actual_vartype'] is not None)] ) # figure out all alias types all_aliastypes = recovered_status # add these to the outdict outdict['aliastypes'] = all_aliastypes outdict['pfmethods'] = all_pfmethods outdict['vartypes'] = all_vartypes # these are recfracs per-magcol, -vartype, -periodfinder, -aliastype # binned appropriately by mags, periods, amplitudes, and ndet # all of these have the shape as the magcols, aliastypes, pfmethods, and # vartypes lists above. magbinned_per_magcol_recfracs = [] magbinned_per_vartype_recfracs = [] magbinned_per_pfmethod_recfracs = [] magbinned_per_aliastype_recfracs = [] periodbinned_per_magcol_recfracs = [] periodbinned_per_vartype_recfracs = [] periodbinned_per_pfmethod_recfracs = [] periodbinned_per_aliastype_recfracs = [] amplitudebinned_per_magcol_recfracs = [] amplitudebinned_per_vartype_recfracs = [] amplitudebinned_per_pfmethod_recfracs = [] amplitudebinned_per_aliastype_recfracs = [] ndetbinned_per_magcol_recfracs = [] ndetbinned_per_vartype_recfracs = [] ndetbinned_per_pfmethod_recfracs = [] ndetbinned_per_aliastype_recfracs = [] # # finally, we do stuff for the plots! # recplotdir = os.path.join(simbasedir, 'periodic-variable-recovery-plots') if not os.path.exists(recplotdir): os.mkdir(recplotdir) # 1. recovery-rate by magbin # 1a. plot of overall recovery rate per magbin fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) plt.plot(magbinned_sdssr, magbinned_recfrac,marker='.',ms=0.0) plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('overall recovery fraction by periodic var magnitudes') plt.ylim((0,1)) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-magnitudes-overall.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 1b. plot of recovery rate per magbin per magcol fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) for magcol in magcols: thismagcol_recfracs = [] for magbin_pv, magbin_rv in zip(magbinned_periodicvars, magbinned_recovered_objects): thisbin_thismagcol_recvars = [ x for x in magbin_rv if (precvar['details'][x]['best_recovered_magcol'] == magcol) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thismagcol_recvars).size / magbin_pv.size ) thismagcol_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(magbinned_sdssr, np.array(thismagcol_recfracs), marker='.', label='magcol: %s' % magcol, ms=0.0) # add this to the outdict array magbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs)) # finish up the plot plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per magcol recovery fraction by periodic var magnitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-magnitudes-magcols.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 1c. plot of recovery rate per magbin per periodfinder fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out which pfmethods were used all_pfmethods = np.unique( np.concatenate( [np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']] ) ) for pfm in all_pfmethods: thispf_recfracs = [] for magbin_pv, magbin_rv in zip(magbinned_periodicvars, magbinned_recovered_objects): thisbin_thispf_recvars = [ x for x in magbin_rv if (precvar['details'][x]['best_recovered_pfmethod'] == pfm) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thispf_recvars).size / magbin_pv.size ) thispf_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(magbinned_sdssr, np.array(thispf_recfracs), marker='.', label='%s' % pfm.upper(), ms=0.0) # add this to the outdict array magbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs)) # finish up the plot plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per period-finder recovery fraction by periodic var magnitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-magnitudes-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 1d. plot of recovery rate per magbin per variable type fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out all vartypes all_vartypes = np.unique( [(precvar['details'][x]['actual_vartype']) for x in precvar['details'] if (precvar['details'][x]['actual_vartype'] is not None)] ) for vt in all_vartypes: thisvt_recfracs = [] for magbin_pv, magbin_rv in zip(magbinned_periodicvars, magbinned_recovered_objects): thisbin_thisvt_recvars = [ x for x in magbin_rv if (precvar['details'][x]['actual_vartype'] == vt) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thisvt_recvars).size / magbin_pv.size ) thisvt_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(magbinned_sdssr, np.array(thisvt_recfracs), marker='.', label='%s' % vt, ms=0.0) # add this to the outdict array magbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs)) # finish up the plot plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per vartype recovery fraction by periodic var magnitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-magnitudes-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 1e. plot of recovery rate per magbin per alias type fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out all alias types all_aliastypes = recovered_status for at in all_aliastypes: thisat_recfracs = [] for magbin_pv, magbin_rv in zip(magbinned_periodicvars, magbinned_recovered_objects): thisbin_thisat_recvars = [ x for x in magbin_rv if (precvar['details'][x]['best_recovered_status'][0] == at) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thisat_recvars).size / magbin_pv.size ) thisat_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(magbinned_sdssr, np.array(thisat_recfracs), marker='.', label='%s' % at, ms=0.0) # add this to the outdict array magbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs)) # finish up the plot plt.plot(magbinned_sdssr, magbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per alias-type recovery fraction by periodic var magnitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-magnitudes-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 2. recovery-rate by periodbin # 2a. plot of overall recovery rate per periodbin fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.',ms=0.0) plt.xlabel('periodic variable period [days]') plt.ylabel('recovered fraction of periodic variables') plt.title('overall recovery fraction by periodic var periods') plt.ylim((0,1)) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-periods-overall.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 2b. plot of recovery rate per periodbin per magcol fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) for magcol in magcols: thismagcol_recfracs = [] for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars, periodbinned_recovered_objects): thisbin_thismagcol_recvars = [ x for x in periodbin_rv if (precvar['details'][x]['best_recovered_magcol'] == magcol) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thismagcol_recvars).size / periodbin_pv.size ) thismagcol_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(periodbinned_periods, np.array(thismagcol_recfracs), marker='.', label='magcol: %s' % magcol, ms=0.0) # add this to the outdict array periodbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs)) # finish up the plot plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per magcol recovery fraction by periodic var periods') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-periods-magcols.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 2c. plot of recovery rate per periodbin per periodfinder fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out which pfmethods were used all_pfmethods = np.unique( np.concatenate( [np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']] ) ) for pfm in all_pfmethods: thispf_recfracs = [] for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars, periodbinned_recovered_objects): thisbin_thispf_recvars = [ x for x in periodbin_rv if (precvar['details'][x]['best_recovered_pfmethod'] == pfm) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thispf_recvars).size / periodbin_pv.size ) thispf_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(periodbinned_periods, np.array(thispf_recfracs), marker='.', label='%s' % pfm.upper(), ms=0.0) # add this to the outdict array periodbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs)) # finish up the plot plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per period-finder recovery fraction by periodic var periods') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-periods-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 2d. plot of recovery rate per periodbin per variable type fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out all vartypes all_vartypes = np.unique( [(precvar['details'][x]['actual_vartype']) for x in precvar['details'] if (precvar['details'][x]['actual_vartype'] is not None)] ) for vt in all_vartypes: thisvt_recfracs = [] for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars, periodbinned_recovered_objects): thisbin_thisvt_recvars = [ x for x in periodbin_rv if (precvar['details'][x]['actual_vartype'] == vt) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thisvt_recvars).size / periodbin_pv.size ) thisvt_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(periodbinned_periods, np.array(thisvt_recfracs), marker='.', label='%s' % vt, ms=0.0) # add this to the outdict array periodbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs)) # finish up the plot plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per vartype recovery fraction by periodic var magnitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-periods-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 2e. plot of recovery rate per periodbin per alias type fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out all vartypes all_aliastypes = recovered_status for at in all_aliastypes: thisat_recfracs = [] for periodbin_pv, periodbin_rv in zip( periodbinned_periodicvars, periodbinned_recovered_objects ): thisbin_thisat_recvars = [ x for x in periodbin_rv if (precvar['details'][x]['best_recovered_status'][0] == at) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thisat_recvars).size / periodbin_pv.size ) thisat_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(periodbinned_periods, np.array(thisat_recfracs), marker='.', label='%s' % at, ms=0.0) # add this to the outdict array periodbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs)) # finish up the plot plt.plot(periodbinned_periods, periodbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per alias-type recovery fraction by periodic var magnitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-periods-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 3. recovery-rate by amplitude bin # 3a. plot of overall recovery rate per amplitude bin fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',ms=0.0) plt.xlabel('periodic variable amplitude [mag]') plt.ylabel('recovered fraction of periodic variables') plt.title('overall recovery fraction by periodic var amplitudes') plt.ylim((0,1)) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-amplitudes-overall.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 3b. plot of recovery rate per amplitude bin per magcol fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) for magcol in magcols: thismagcol_recfracs = [] for amplitudebin_pv, amplitudebin_rv in zip( amplitudebinned_periodicvars, amplitudebinned_recovered_objects ): thisbin_thismagcol_recvars = [ x for x in amplitudebin_rv if (precvar['details'][x]['best_recovered_magcol'] == magcol) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thismagcol_recvars).size / amplitudebin_pv.size ) thismagcol_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(amplitudebinned_amplitudes, np.array(thismagcol_recfracs), marker='.', label='magcol: %s' % magcol, ms=0.0) # add this to the outdict array amplitudebinned_per_magcol_recfracs.append( np.array(thismagcol_recfracs) ) # finish up the plot plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per magcol recovery fraction by periodic var amplitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-amplitudes-magcols.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 3c. plot of recovery rate per amplitude bin per periodfinder fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out which pfmethods were used all_pfmethods = np.unique( np.concatenate( [np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']] ) ) for pfm in all_pfmethods: thispf_recfracs = [] for amplitudebin_pv, amplitudebin_rv in zip( amplitudebinned_periodicvars, amplitudebinned_recovered_objects ): thisbin_thispf_recvars = [ x for x in amplitudebin_rv if (precvar['details'][x]['best_recovered_pfmethod'] == pfm) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thispf_recvars).size / amplitudebin_pv.size ) thispf_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(amplitudebinned_amplitudes, np.array(thispf_recfracs), marker='.', label='%s' % pfm.upper(), ms=0.0) # add this to the outdict array amplitudebinned_per_pfmethod_recfracs.append( np.array(thispf_recfracs) ) # finish up the plot plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per period-finder recovery fraction by periodic var amplitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-amplitudes-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 3d. plot of recovery rate per amplitude bin per variable type fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out all vartypes all_vartypes = np.unique( [(precvar['details'][x]['actual_vartype']) for x in precvar['details'] if (precvar['details'][x]['actual_vartype'] is not None)] ) for vt in all_vartypes: thisvt_recfracs = [] for amplitudebin_pv, amplitudebin_rv in zip( amplitudebinned_periodicvars, amplitudebinned_recovered_objects ): thisbin_thisvt_recvars = [ x for x in amplitudebin_rv if (precvar['details'][x]['actual_vartype'] == vt) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thisvt_recvars).size / amplitudebin_pv.size ) thisvt_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(amplitudebinned_amplitudes, np.array(thisvt_recfracs), marker='.', label='%s' % vt, ms=0.0) # add this to the outdict array amplitudebinned_per_vartype_recfracs.append( np.array(thisvt_recfracs) ) # finish up the plot plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per vartype recovery fraction by periodic var amplitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-amplitudes-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 3e. plot of recovery rate per amplitude bin per alias type fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out all vartypes all_aliastypes = recovered_status for at in all_aliastypes: thisat_recfracs = [] for amplitudebin_pv, amplitudebin_rv in zip( amplitudebinned_periodicvars, amplitudebinned_recovered_objects ): thisbin_thisat_recvars = [ x for x in amplitudebin_rv if (precvar['details'][x]['best_recovered_status'][0] == at) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thisat_recvars).size / amplitudebin_pv.size ) thisat_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(amplitudebinned_amplitudes, np.array(thisat_recfracs), marker='.', label='%s' % at, ms=0.0) # add this to the outdict array amplitudebinned_per_aliastype_recfracs.append( np.array(thisat_recfracs) ) # finish up the plot plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per alias-type recovery fraction by periodic var amplitudes') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-amplitudes-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 4. recovery-rate by ndet bin # 4a. plot of overall recovery rate per ndet bin fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.',ms=0.0) plt.xlabel('periodic variable light curve points') plt.ylabel('recovered fraction of periodic variables') plt.title('overall recovery fraction by periodic var ndet') plt.ylim((0,1)) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-ndet-overall.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 4b. plot of recovery rate per ndet bin per magcol fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) for magcol in magcols: thismagcol_recfracs = [] for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars, ndetbinned_recovered_objects): thisbin_thismagcol_recvars = [ x for x in ndetbin_rv if (precvar['details'][x]['best_recovered_magcol'] == magcol) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thismagcol_recvars).size / ndetbin_pv.size ) thismagcol_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(ndetbinned_ndets, np.array(thismagcol_recfracs), marker='.', label='magcol: %s' % magcol, ms=0.0) # add this to the outdict array ndetbinned_per_magcol_recfracs.append( np.array(thismagcol_recfracs) ) # finish up the plot plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per magcol recovery fraction by periodic var ndets') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-ndet-magcols.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 4c. plot of recovery rate per ndet bin per periodfinder fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out which pfmethods were used all_pfmethods = np.unique( np.concatenate( [np.unique(precvar['details'][x]['recovery_pfmethods']) for x in precvar['details']] ) ) for pfm in all_pfmethods: thispf_recfracs = [] for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars, ndetbinned_recovered_objects): thisbin_thispf_recvars = [ x for x in ndetbin_rv if (precvar['details'][x]['best_recovered_pfmethod'] == pfm) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thispf_recvars).size / ndetbin_pv.size ) thispf_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(ndetbinned_ndets, np.array(thispf_recfracs), marker='.', label='%s' % pfm.upper(), ms=0.0) # add this to the outdict array ndetbinned_per_pfmethod_recfracs.append( np.array(thispf_recfracs) ) # finish up the plot plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per period-finder recovery fraction by periodic var ndets') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-ndet-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 4d. plot of recovery rate per ndet bin per variable type fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out all vartypes all_vartypes = np.unique( [(precvar['details'][x]['actual_vartype']) for x in precvar['details'] if (precvar['details'][x]['actual_vartype'] in PERIODIC_VARTYPES)] ) for vt in all_vartypes: thisvt_recfracs = [] for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars, ndetbinned_recovered_objects): thisbin_thisvt_recvars = [ x for x in ndetbin_rv if (precvar['details'][x]['actual_vartype'] == vt) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thisvt_recvars).size / ndetbin_pv.size ) thisvt_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(ndetbinned_ndets, np.array(thisvt_recfracs), marker='.', label='%s' % vt, ms=0.0) # add this to the outdict array ndetbinned_per_vartype_recfracs.append( np.array(thisvt_recfracs) ) # finish up the plot plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per vartype recovery fraction by periodic var ndets') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-ndet-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 4e. plot of recovery rate per ndet bin per alias type fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) # figure out all vartypes all_aliastypes = recovered_status for at in all_aliastypes: thisat_recfracs = [] for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars, ndetbinned_recovered_objects): thisbin_thisat_recvars = [ x for x in ndetbin_rv if (precvar['details'][x]['best_recovered_status'][0] == at) ] thisbin_thismagcol_recfrac = ( np.array(thisbin_thisat_recvars).size / ndetbin_pv.size ) thisat_recfracs.append(thisbin_thismagcol_recfrac) # now that we have per magcol recfracs, plot them plt.plot(ndetbinned_ndets, np.array(thisat_recfracs), marker='.', label='%s' % at, ms=0.0) # add this to the outdict array ndetbinned_per_aliastype_recfracs.append( np.array(thisat_recfracs) ) # finish up the plot plt.plot(ndetbinned_ndets, ndetbinned_recfrac, marker='.',ms=0.0, label='overall', color='k') plt.xlabel(r'SDSS $r$ magnitude') plt.ylabel('recovered fraction of periodic variables') plt.title('per alias-type recovery fraction by periodic var ndets') plt.ylim((0,1)) plt.legend(markerscale=10.0) plt.savefig( os.path.join(recplotdir, 'recfrac-binned-ndet-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # update the lists in the outdict outdict['magbinned_per_magcol_recfracs'] = ( magbinned_per_magcol_recfracs ) outdict['magbinned_per_pfmethod_recfracs'] = ( magbinned_per_pfmethod_recfracs ) outdict['magbinned_per_vartype_recfracs'] = ( magbinned_per_vartype_recfracs ) outdict['magbinned_per_aliastype_recfracs'] = ( magbinned_per_aliastype_recfracs ) outdict['periodbinned_per_magcol_recfracs'] = ( periodbinned_per_magcol_recfracs ) outdict['periodbinned_per_pfmethod_recfracs'] = ( periodbinned_per_pfmethod_recfracs ) outdict['periodbinned_per_vartype_recfracs'] = ( periodbinned_per_vartype_recfracs ) outdict['periodbinned_per_aliastype_recfracs'] = ( periodbinned_per_aliastype_recfracs ) outdict['amplitudebinned_per_magcol_recfracs'] = ( amplitudebinned_per_magcol_recfracs ) outdict['amplitudebinned_per_pfmethod_recfracs'] = ( amplitudebinned_per_pfmethod_recfracs ) outdict['amplitudebinned_per_vartype_recfracs'] = ( amplitudebinned_per_vartype_recfracs ) outdict['amplitudebinned_per_aliastype_recfracs'] = ( amplitudebinned_per_aliastype_recfracs ) outdict['ndetbinned_per_magcol_recfracs'] = ( ndetbinned_per_magcol_recfracs ) outdict['ndetbinned_per_pfmethod_recfracs'] = ( ndetbinned_per_pfmethod_recfracs ) outdict['ndetbinned_per_vartype_recfracs'] = ( ndetbinned_per_vartype_recfracs ) outdict['ndetbinned_per_aliastype_recfracs'] = ( ndetbinned_per_aliastype_recfracs ) # get the overall recovered vars per pfmethod overall_recvars_per_pfmethod = [] for pfm in all_pfmethods: thispfm_recvars = np.array([ x for x in precvar['details'] if ((x in recovered_periodicvars) and (precvar['details'][x]['best_recovered_pfmethod'] == pfm)) ]) overall_recvars_per_pfmethod.append(thispfm_recvars) # get the overall recovered vars per vartype overall_recvars_per_vartype = [] for vt in all_vartypes: thisvt_recvars = np.array([ x for x in precvar['details'] if ((x in recovered_periodicvars) and (precvar['details'][x]['actual_vartype'] == vt)) ]) overall_recvars_per_vartype.append(thisvt_recvars) # get the overall recovered vars per magcol overall_recvars_per_magcol = [] for mc in magcols: thismc_recvars = np.array([ x for x in precvar['details'] if ((x in recovered_periodicvars) and (precvar['details'][x]['best_recovered_magcol'] == mc)) ]) overall_recvars_per_magcol.append(thismc_recvars) # get the overall recovered vars per aliastype overall_recvars_per_aliastype = [] for at in all_aliastypes: thisat_recvars = np.array([ x for x in precvar['details'] if ((x in recovered_periodicvars) and (precvar['details'][x]['best_recovered_status'] == at)) ]) overall_recvars_per_aliastype.append(thisat_recvars) # update the outdict with these outdict['overall_recfrac_per_pfmethod'] = np.array([ x.size/actual_periodicvars.size for x in overall_recvars_per_pfmethod ]) outdict['overall_recfrac_per_vartype'] = np.array([ x.size/actual_periodicvars.size for x in overall_recvars_per_vartype ]) outdict['overall_recfrac_per_magcol'] = np.array([ x.size/actual_periodicvars.size for x in overall_recvars_per_magcol ]) outdict['overall_recfrac_per_aliastype'] = np.array([ x.size/actual_periodicvars.size for x in overall_recvars_per_aliastype ]) # 5. bar plot of overall recovery rate per pfmethod fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) xt = np.arange(len(all_pfmethods)) xl = all_pfmethods plt.barh(xt, outdict['overall_recfrac_per_pfmethod'], 0.50) plt.yticks(xt, xl) plt.xlabel('period-finding method') plt.ylabel('overall recovery rate') plt.title('overall recovery rate per period-finding method') plt.savefig( os.path.join(recplotdir, 'recfrac-overall-pfmethod.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 6. bar plot of overall recovery rate per magcol fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) xt = np.arange(len(magcols)) xl = magcols plt.barh(xt, outdict['overall_recfrac_per_magcol'], 0.50) plt.yticks(xt, xl) plt.xlabel('light curve magnitude column') plt.ylabel('overall recovery rate') plt.title('overall recovery rate per light curve magcol') plt.savefig( os.path.join(recplotdir, 'recfrac-overall-magcol.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 7. bar plot of overall recovery rate per aliastype fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) xt = np.arange(len(all_aliastypes)) xl = all_aliastypes plt.barh(xt, outdict['overall_recfrac_per_aliastype'], 0.50) plt.yticks(xt, xl) plt.xlabel('period recovery status') plt.ylabel('overall recovery rate') plt.title('overall recovery rate per period recovery status') plt.savefig( os.path.join(recplotdir, 'recfrac-overall-aliastype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 8. bar plot of overall recovery rate per vartype fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) xt = np.arange(len(all_vartypes)) xl = all_vartypes plt.barh(xt, outdict['overall_recfrac_per_vartype'], 0.50) plt.yticks(xt, xl) plt.xlabel('periodic variable type') plt.ylabel('overall recovery rate') plt.title('overall recovery rate per periodic variable type') plt.savefig( os.path.join(recplotdir, 'recfrac-overall-vartype.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 9. overall recovered period periodogram for objects that aren't actual # periodic variables. this effectively should give us the window function of # the observations notvariable_recovered_periods = np.concatenate([ precvar['details'][x]['recovery_periods'] for x in precvar['details'] if (precvar['details'][x]['actual_vartype'] is None) ]) notvariable_recovered_lspvals = np.concatenate([ precvar['details'][x]['recovery_lspvals'] for x in precvar['details'] if (precvar['details'][x]['actual_vartype'] is None) ]) sortind = np.argsort(notvariable_recovered_periods) notvariable_recovered_periods = notvariable_recovered_periods[sortind] notvariable_recovered_lspvals = notvariable_recovered_lspvals[sortind] outdict['notvariable_recovered_periods'] = notvariable_recovered_periods outdict['notvariable_recovered_lspvals'] = notvariable_recovered_lspvals fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) plt.plot(notvariable_recovered_periods, notvariable_recovered_lspvals, ms=1.0,linestyle='none',marker='.') plt.xscale('log') plt.xlabel('recovered periods [days]') plt.ylabel('recovered normalized periodogram power') plt.title('periodogram for actual not-variable objects') plt.savefig( os.path.join(recplotdir, 'recovered-periodogram-nonvariables.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # 10. overall recovered period histogram for objects marked # not-variable. this gives us the most common periods fig = plt.figure(figsize=(6.4*1.5,4.8*1.5)) plt.hist(notvariable_recovered_periods,bins=np.arange(0.02,300.0,1.0e-3), histtype='step') plt.xscale('log') plt.xlabel('recovered periods [days]') plt.ylabel('number of times periods recovered') plt.title('recovered period histogram for non-variable objects') plt.savefig( os.path.join(recplotdir, 'recovered-period-hist-nonvariables.%s' % plotfile_ext), dpi=100, bbox_inches='tight' ) plt.close('all') # at the end, write the outdict to a pickle and return it outfile = os.path.join(simbasedir, 'periodicvar-recovery-plotresults.pkl') with open(outfile,'wb') as outfd: pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL) return outdict
This plots the results of periodic var recovery. This function makes plots for periodicvar recovered fraction as a function of: - magbin - periodbin - amplitude of variability - ndet with plot lines broken down by: - magcol - periodfinder - vartype - recovery status The kwargs `magbins`, `periodbins`, `amplitudebins`, and `ndetbins` can be used to set the bin lists as needed. The kwarg `minbinsize` controls how many elements per bin are required to accept a bin in processing its recovery characteristics for mags, periods, amplitudes, and ndets. Parameters ---------- precvar_results : dict or str This is either a dict returned by parallel_periodicvar_recovery or the pickle created by that function. aliases_count_as_recovered : list of str or 'all' This is used to set which kinds of aliases this function considers as 'recovered' objects. Normally, we require that recovered objects have a recovery status of 'actual' to indicate the actual period was recovered. To change this default behavior, aliases_count_as_recovered can be set to a list of alias status strings that should be considered as 'recovered' objects as well. Choose from the following alias types:: 'twice' recovered_p = 2.0*actual_p 'half' recovered_p = 0.5*actual_p 'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p) 'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p) 'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p) 'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p) 'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p) 'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p) 'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0) 'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0) or set `aliases_count_as_recovered='all'` to include all of the above in the 'recovered' periodic var list. magbins : np.array The magnitude bins to plot the recovery rate results over. If None, the default mag bins will be used: `np.arange(8.0,16.25,0.25)`. periodbins : np.array The period bins to plot the recovery rate results over. If None, the default period bins will be used: `np.arange(0.0,500.0,0.5)`. amplitudebins : np.array The variability amplitude bins to plot the recovery rate results over. If None, the default amplitude bins will be used: `np.arange(0.0,2.0,0.05)`. ndetbins : np.array The ndet bins to plot the recovery rate results over. If None, the default ndet bins will be used: `np.arange(0.0,60000.0,1000.0)`. minbinsize : int The minimum number of objects per bin required to plot a bin and its recovery fraction on the plot. plotfile_ext : {'png','pdf'} Sets the plot output files' extension. Returns ------- dict A dict containing recovery fraction statistics and the paths to each of the plots made.
def getDescendantsUIDs(self, all_descendants=False): """Returns the UIDs of the descendant Analysis Requests This method is used as metadata """ descendants = self.getDescendants(all_descendants=all_descendants) return map(api.get_uid, descendants)
Returns the UIDs of the descendant Analysis Requests This method is used as metadata
def get_installation_order(self, req_set): # type: (RequirementSet) -> List[InstallRequirement] """Create the installation order. The installation order is topological - requirements are installed before the requiring thing. We break cycles at an arbitrary point, and make no other guarantees. """ # The current implementation, which we may change at any point # installs the user specified things in the order given, except when # dependencies must come earlier to achieve topological order. order = [] ordered_reqs = set() # type: Set[InstallRequirement] def schedule(req): if req.satisfied_by or req in ordered_reqs: return if req.constraint: return ordered_reqs.add(req) for dep in self._discovered_dependencies[req.name]: schedule(dep) order.append(req) for install_req in req_set.requirements.values(): schedule(install_req) return order
Create the installation order. The installation order is topological - requirements are installed before the requiring thing. We break cycles at an arbitrary point, and make no other guarantees.
def _X509__asn1date_to_datetime(asn1date): """ Converts openssl ASN1_TIME object to python datetime.datetime """ bio = Membio() libcrypto.ASN1_TIME_print(bio.bio, asn1date) pydate = datetime.strptime(str(bio), "%b %d %H:%M:%S %Y %Z") return pydate.replace(tzinfo=utc)
Converts openssl ASN1_TIME object to python datetime.datetime
def start(main_gui_class, **kwargs): """This method starts the webserver with a specific App subclass.""" debug = kwargs.pop('debug', False) standalone = kwargs.pop('standalone', False) logging.basicConfig(level=logging.DEBUG if debug else logging.INFO, format='%(name)-16s %(levelname)-8s %(message)s') logging.getLogger('remi').setLevel( level=logging.DEBUG if debug else logging.INFO) if standalone: s = StandaloneServer(main_gui_class, start=True, **kwargs) else: s = Server(main_gui_class, start=True, **kwargs)
This method starts the webserver with a specific App subclass.
def _time_to_string(self, dt, conversion_string="%Y %m %d %H %M"): """ This converts a UTC time integer to a string """ if self.output_timezone is not None: dt = dt.replace(tzinfo=utc) \ .astimezone(self.output_timezone) return dt.strftime(conversion_string)
This converts a UTC time integer to a string
def make(self): """ Make the lock file. """ try: # Create the lock file self.mkfile(self.lock_file) except Exception as e: self.die('Failed to generate lock file: {}'.format(str(e)))
Make the lock file.
def write_quotes(self, quotes): ''' write quotes ''' if self.first: Base.metadata.create_all(self.engine, checkfirst=True) self.first=False session=self.getWriteSession() session.add_all([self.__quoteToSql(quote) for quote in quotes])
write quotes
def _createIndexesFor(self, tableClass, extantIndexes): """ Create any indexes which don't exist and are required by the schema defined by C{tableClass}. @param tableClass: A L{MetaItem} instance which may define a schema which includes indexes. @param extantIndexes: A container (anything which can be the right-hand argument to the C{in} operator) which contains the unqualified names of all indexes which already exist in the underlying database and do not need to be created. """ try: indexes = _requiredTableIndexes[tableClass] except KeyError: indexes = set() for nam, atr in tableClass.getSchema(): if atr.indexed: indexes.add(((atr.getShortColumnName(self),), (atr.attrname,))) for compound in atr.compoundIndexes: indexes.add((tuple(inatr.getShortColumnName(self) for inatr in compound), tuple(inatr.attrname for inatr in compound))) _requiredTableIndexes[tableClass] = indexes # _ZOMFG_ SQL is such a piece of _shit_: you can't fully qualify the # table name in CREATE INDEX statements because the _INDEX_ is fully # qualified! indexColumnPrefix = '.'.join(self.getTableName(tableClass).split(".")[1:]) for (indexColumns, indexAttrs) in indexes: nameOfIndex = self._indexNameOf(tableClass, indexAttrs) if nameOfIndex in extantIndexes: continue csql = 'CREATE INDEX %s.%s ON %s(%s)' % ( self.databaseName, nameOfIndex, indexColumnPrefix, ', '.join(indexColumns)) self.createSQL(csql)
Create any indexes which don't exist and are required by the schema defined by C{tableClass}. @param tableClass: A L{MetaItem} instance which may define a schema which includes indexes. @param extantIndexes: A container (anything which can be the right-hand argument to the C{in} operator) which contains the unqualified names of all indexes which already exist in the underlying database and do not need to be created.
def accept(kind, doc=None, error_text=None, exception_handlers=empty.dict, accept_context=False): """Allows quick wrapping of any Python type cast function for use as a hug type annotation""" return create( doc, error_text, exception_handlers=exception_handlers, chain=False, accept_context=accept_context )(kind)
Allows quick wrapping of any Python type cast function for use as a hug type annotation
def load(fh, single=False, version=_default_version, strict=False, errors='warn'): """ Deserialize SimpleMRSs from a file (handle or filename) Args: fh (str, file): input filename or file object single: if `True`, only return the first read Xmrs object strict: deprecated; a `True` value is the same as `errors='strict'`, and a `False` value is the same as `errors='warn'` errors: if `'strict'`, ill-formed MRSs raise an error; if `'warn'`, raise a warning instead; if `'ignore'`, do not warn or raise errors for ill-formed MRSs Returns: a generator of Xmrs objects (unless the *single* option is `True`) """ if isinstance(fh, stringtypes): s = open(fh, 'r').read() else: s = fh.read() return loads(s, single=single, version=version, strict=strict, errors=errors)
Deserialize SimpleMRSs from a file (handle or filename) Args: fh (str, file): input filename or file object single: if `True`, only return the first read Xmrs object strict: deprecated; a `True` value is the same as `errors='strict'`, and a `False` value is the same as `errors='warn'` errors: if `'strict'`, ill-formed MRSs raise an error; if `'warn'`, raise a warning instead; if `'ignore'`, do not warn or raise errors for ill-formed MRSs Returns: a generator of Xmrs objects (unless the *single* option is `True`)
def get_blob( self, blob_name, client=None, encryption_key=None, generation=None, **kwargs ): """Get a blob object by name. This will return None if the blob doesn't exist: .. literalinclude:: snippets.py :start-after: [START get_blob] :end-before: [END get_blob] If :attr:`user_project` is set, bills the API request to that project. :type blob_name: str :param blob_name: The name of the blob to retrieve. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :type encryption_key: bytes :param encryption_key: Optional 32 byte encryption key for customer-supplied encryption. See https://cloud.google.com/storage/docs/encryption#customer-supplied. :type generation: long :param generation: Optional. If present, selects a specific revision of this object. :type kwargs: dict :param kwargs: Keyword arguments to pass to the :class:`~google.cloud.storage.blob.Blob` constructor. :rtype: :class:`google.cloud.storage.blob.Blob` or None :returns: The blob object if it exists, otherwise None. """ blob = Blob( bucket=self, name=blob_name, encryption_key=encryption_key, generation=generation, **kwargs ) try: # NOTE: This will not fail immediately in a batch. However, when # Batch.finish() is called, the resulting `NotFound` will be # raised. blob.reload(client=client) except NotFound: return None else: return blob
Get a blob object by name. This will return None if the blob doesn't exist: .. literalinclude:: snippets.py :start-after: [START get_blob] :end-before: [END get_blob] If :attr:`user_project` is set, bills the API request to that project. :type blob_name: str :param blob_name: The name of the blob to retrieve. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :type encryption_key: bytes :param encryption_key: Optional 32 byte encryption key for customer-supplied encryption. See https://cloud.google.com/storage/docs/encryption#customer-supplied. :type generation: long :param generation: Optional. If present, selects a specific revision of this object. :type kwargs: dict :param kwargs: Keyword arguments to pass to the :class:`~google.cloud.storage.blob.Blob` constructor. :rtype: :class:`google.cloud.storage.blob.Blob` or None :returns: The blob object if it exists, otherwise None.
def envs(self): ''' Return the available environments ''' ret = [] for saltenv in self.opts['pillar_roots']: ret.append(saltenv) return ret
Return the available environments
def get_word_index(tokens, char_index): ''' Given word return word index. ''' for (i, token) in enumerate(tokens): if token['char_end'] == 0: continue if token['char_begin'] <= char_index and char_index <= token['char_end']: return i return 0
Given word return word index.
def _encode_attribute(self, name, type_): '''(INTERNAL) Encodes an attribute line. The attribute follow the template:: @attribute <attribute-name> <datatype> where ``attribute-name`` is a string, and ``datatype`` can be: - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``. - Strings as ``STRING``. - Dates (NOT IMPLEMENTED). - Nominal attributes with format: {<nominal-name1>, <nominal-name2>, <nominal-name3>, ...} This method must receive a the name of the attribute and its type, if the attribute type is nominal, ``type`` must be a list of values. :param name: a string. :param type_: a string or a list of string. :return: a string with the encoded attribute declaration. ''' for char in ' %{},': if char in name: name = '"%s"'%name break if isinstance(type_, (tuple, list)): type_tmp = [u'%s' % encode_string(type_k) for type_k in type_] type_ = u'{%s}'%(u', '.join(type_tmp)) return u'%s %s %s'%(_TK_ATTRIBUTE, name, type_)
(INTERNAL) Encodes an attribute line. The attribute follow the template:: @attribute <attribute-name> <datatype> where ``attribute-name`` is a string, and ``datatype`` can be: - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``. - Strings as ``STRING``. - Dates (NOT IMPLEMENTED). - Nominal attributes with format: {<nominal-name1>, <nominal-name2>, <nominal-name3>, ...} This method must receive a the name of the attribute and its type, if the attribute type is nominal, ``type`` must be a list of values. :param name: a string. :param type_: a string or a list of string. :return: a string with the encoded attribute declaration.
def lemmatize(self, text, best_guess=True, return_frequencies=False): """Lemmatize all tokens in a string or a list. A string is first tokenized using punkt. Throw a type error if the input is neither a string nor a list. """ if isinstance(text, str): tokens = wordpunct_tokenize(text) elif isinstance(text, list): tokens= text else: raise TypeError("lemmatize only works with strings or lists of string tokens.") return [self._lemmatize_token(token, best_guess, return_frequencies) for token in tokens]
Lemmatize all tokens in a string or a list. A string is first tokenized using punkt. Throw a type error if the input is neither a string nor a list.
def get_version(): """Extracts the version number from the version.py file.""" VERSION_FILE = '../malcolm/version.py' mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M) if mo: return mo.group(1) else: raise RuntimeError( 'Unable to find version string in {0}.'.format(VERSION_FILE))
Extracts the version number from the version.py file.
def plot_points(points, show=True): """ Plot an (n,3) list of points using matplotlib Parameters ------------- points : (n, 3) float Points in space show : bool If False, will not show until plt.show() is called """ import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # NOQA points = np.asanyarray(points, dtype=np.float64) if len(points.shape) != 2: raise ValueError('Points must be (n, 2|3)!') if points.shape[1] == 3: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(*points.T) elif points.shape[1] == 2: plt.scatter(*points.T) else: raise ValueError('points not 2D/3D: {}'.format( points.shape)) if show: plt.show()
Plot an (n,3) list of points using matplotlib Parameters ------------- points : (n, 3) float Points in space show : bool If False, will not show until plt.show() is called