repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
guaix-ucm/numina
numina/instrument/simulation/factory.py
RunCounter.runstring
def runstring(self): """Return the run number and the file name.""" cfile = self.template % self.last self.last += 1 return cfile
python
def runstring(self): """Return the run number and the file name.""" cfile = self.template % self.last self.last += 1 return cfile
[ "def", "runstring", "(", "self", ")", ":", "cfile", "=", "self", ".", "template", "%", "self", ".", "last", "self", ".", "last", "+=", "1", "return", "cfile" ]
Return the run number and the file name.
[ "Return", "the", "run", "number", "and", "the", "file", "name", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/simulation/factory.py#L21-L25
train
guaix-ucm/numina
numina/dal/dictdal.py
BaseDictDAL.obsres_from_oblock_id
def obsres_from_oblock_id(self, obsid, configuration=None): """" Override instrument configuration if configuration is not None """ este = self.ob_table[obsid] obsres = obsres_from_dict(este) _logger.debug("obsres_from_oblock_id id='%s', mode='%s' START", obsid, obsres.mode) try: this_drp = self.drps.query_by_name(obsres.instrument) except KeyError: raise ValueError('no DRP for instrument {}'.format(obsres.instrument)) # Reserved names if obsres.mode in self._RESERVED_MODE_NAMES: selected_mode = None # null mode else: selected_mode = this_drp.modes[obsres.mode] if selected_mode: obsres = selected_mode.build_ob(obsres, self) obsres = selected_mode.tag_ob(obsres) if configuration: # override instrument configuration # obsres.configuration = self.search_instrument_configuration( # obsres.instrument, # configuration #) pass else: # Insert Instrument configuration pass # obsres.configuration = this_drp.configuration_selector(obsres) key, date_obs, keyname = this_drp.select_profile(obsres) obsres.configuration = self.assembly_instrument(key, date_obs, keyname) obsres.profile = obsres.configuration _logger.debug('obsres_from_oblock_id %s END', obsid) return obsres
python
def obsres_from_oblock_id(self, obsid, configuration=None): """" Override instrument configuration if configuration is not None """ este = self.ob_table[obsid] obsres = obsres_from_dict(este) _logger.debug("obsres_from_oblock_id id='%s', mode='%s' START", obsid, obsres.mode) try: this_drp = self.drps.query_by_name(obsres.instrument) except KeyError: raise ValueError('no DRP for instrument {}'.format(obsres.instrument)) # Reserved names if obsres.mode in self._RESERVED_MODE_NAMES: selected_mode = None # null mode else: selected_mode = this_drp.modes[obsres.mode] if selected_mode: obsres = selected_mode.build_ob(obsres, self) obsres = selected_mode.tag_ob(obsres) if configuration: # override instrument configuration # obsres.configuration = self.search_instrument_configuration( # obsres.instrument, # configuration #) pass else: # Insert Instrument configuration pass # obsres.configuration = this_drp.configuration_selector(obsres) key, date_obs, keyname = this_drp.select_profile(obsres) obsres.configuration = self.assembly_instrument(key, date_obs, keyname) obsres.profile = obsres.configuration _logger.debug('obsres_from_oblock_id %s END', obsid) return obsres
[ "def", "obsres_from_oblock_id", "(", "self", ",", "obsid", ",", "configuration", "=", "None", ")", ":", "este", "=", "self", ".", "ob_table", "[", "obsid", "]", "obsres", "=", "obsres_from_dict", "(", "este", ")", "_logger", ".", "debug", "(", "\"obsres_from_oblock_id id='%s', mode='%s' START\"", ",", "obsid", ",", "obsres", ".", "mode", ")", "try", ":", "this_drp", "=", "self", ".", "drps", ".", "query_by_name", "(", "obsres", ".", "instrument", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "'no DRP for instrument {}'", ".", "format", "(", "obsres", ".", "instrument", ")", ")", "# Reserved names", "if", "obsres", ".", "mode", "in", "self", ".", "_RESERVED_MODE_NAMES", ":", "selected_mode", "=", "None", "# null mode", "else", ":", "selected_mode", "=", "this_drp", ".", "modes", "[", "obsres", ".", "mode", "]", "if", "selected_mode", ":", "obsres", "=", "selected_mode", ".", "build_ob", "(", "obsres", ",", "self", ")", "obsres", "=", "selected_mode", ".", "tag_ob", "(", "obsres", ")", "if", "configuration", ":", "# override instrument configuration", "# obsres.configuration = self.search_instrument_configuration(", "# obsres.instrument,", "# configuration", "#)", "pass", "else", ":", "# Insert Instrument configuration", "pass", "# obsres.configuration = this_drp.configuration_selector(obsres)", "key", ",", "date_obs", ",", "keyname", "=", "this_drp", ".", "select_profile", "(", "obsres", ")", "obsres", ".", "configuration", "=", "self", ".", "assembly_instrument", "(", "key", ",", "date_obs", ",", "keyname", ")", "obsres", ".", "profile", "=", "obsres", ".", "configuration", "_logger", ".", "debug", "(", "'obsres_from_oblock_id %s END'", ",", "obsid", ")", "return", "obsres" ]
Override instrument configuration if configuration is not None
[ "Override", "instrument", "configuration", "if", "configuration", "is", "not", "None" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/dal/dictdal.py#L141-L179
train
guaix-ucm/numina
numina/core/tagexpr.py
map_tree
def map_tree(visitor, tree): """Apply function to nodes""" newn = [map_tree(visitor, node) for node in tree.nodes] return visitor(tree, newn)
python
def map_tree(visitor, tree): """Apply function to nodes""" newn = [map_tree(visitor, node) for node in tree.nodes] return visitor(tree, newn)
[ "def", "map_tree", "(", "visitor", ",", "tree", ")", ":", "newn", "=", "[", "map_tree", "(", "visitor", ",", "node", ")", "for", "node", "in", "tree", ".", "nodes", "]", "return", "visitor", "(", "tree", ",", "newn", ")" ]
Apply function to nodes
[ "Apply", "function", "to", "nodes" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/tagexpr.py#L44-L47
train
guaix-ucm/numina
numina/core/tagexpr.py
filter_tree
def filter_tree(condition, tree): """Return parts of the tree that fulfill condition""" if condition(tree): for node in tree.nodes: # this works in python > 3.3 # yield from filter_tree(condition, node) for n in filter_tree(condition, node): yield n yield tree
python
def filter_tree(condition, tree): """Return parts of the tree that fulfill condition""" if condition(tree): for node in tree.nodes: # this works in python > 3.3 # yield from filter_tree(condition, node) for n in filter_tree(condition, node): yield n yield tree
[ "def", "filter_tree", "(", "condition", ",", "tree", ")", ":", "if", "condition", "(", "tree", ")", ":", "for", "node", "in", "tree", ".", "nodes", ":", "# this works in python > 3.3", "# yield from filter_tree(condition, node)", "for", "n", "in", "filter_tree", "(", "condition", ",", "node", ")", ":", "yield", "n", "yield", "tree" ]
Return parts of the tree that fulfill condition
[ "Return", "parts", "of", "the", "tree", "that", "fulfill", "condition" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/tagexpr.py#L50-L58
train
guaix-ucm/numina
numina/core/tagexpr.py
Expression.fill_placeholders
def fill_placeholders(self, tags): """Substitute Placeholder nodes by its value in tags""" def change_p_node_tags(node, children): if isinstance(node, Placeholder): value = ConstExpr(tags[node.name]) return value else: return node.clone(children) return map_tree(change_p_node_tags, self)
python
def fill_placeholders(self, tags): """Substitute Placeholder nodes by its value in tags""" def change_p_node_tags(node, children): if isinstance(node, Placeholder): value = ConstExpr(tags[node.name]) return value else: return node.clone(children) return map_tree(change_p_node_tags, self)
[ "def", "fill_placeholders", "(", "self", ",", "tags", ")", ":", "def", "change_p_node_tags", "(", "node", ",", "children", ")", ":", "if", "isinstance", "(", "node", ",", "Placeholder", ")", ":", "value", "=", "ConstExpr", "(", "tags", "[", "node", ".", "name", "]", ")", "return", "value", "else", ":", "return", "node", ".", "clone", "(", "children", ")", "return", "map_tree", "(", "change_p_node_tags", ",", "self", ")" ]
Substitute Placeholder nodes by its value in tags
[ "Substitute", "Placeholder", "nodes", "by", "its", "value", "in", "tags" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/tagexpr.py#L137-L146
train
SUNCAT-Center/CatHub
cathub/ase_tools/gas_phase_references.py
molecules2symbols
def molecules2symbols(molecules, add_hydrogen=True): """Take a list of molecules and return just a list of atomic symbols, possibly adding hydrogen """ symbols = sorted( list(set( ase.symbols.string2symbols(''.join( map( lambda _x: ''.join(ase.symbols.string2symbols(_x)), molecules) )) )), key=lambda _y: ase.data.atomic_numbers[_y]) if add_hydrogen and 'H' not in symbols: symbols.insert(0, 'H') return symbols
python
def molecules2symbols(molecules, add_hydrogen=True): """Take a list of molecules and return just a list of atomic symbols, possibly adding hydrogen """ symbols = sorted( list(set( ase.symbols.string2symbols(''.join( map( lambda _x: ''.join(ase.symbols.string2symbols(_x)), molecules) )) )), key=lambda _y: ase.data.atomic_numbers[_y]) if add_hydrogen and 'H' not in symbols: symbols.insert(0, 'H') return symbols
[ "def", "molecules2symbols", "(", "molecules", ",", "add_hydrogen", "=", "True", ")", ":", "symbols", "=", "sorted", "(", "list", "(", "set", "(", "ase", ".", "symbols", ".", "string2symbols", "(", "''", ".", "join", "(", "map", "(", "lambda", "_x", ":", "''", ".", "join", "(", "ase", ".", "symbols", ".", "string2symbols", "(", "_x", ")", ")", ",", "molecules", ")", ")", ")", ")", ")", ",", "key", "=", "lambda", "_y", ":", "ase", ".", "data", ".", "atomic_numbers", "[", "_y", "]", ")", "if", "add_hydrogen", "and", "'H'", "not", "in", "symbols", ":", "symbols", ".", "insert", "(", "0", ",", "'H'", ")", "return", "symbols" ]
Take a list of molecules and return just a list of atomic symbols, possibly adding hydrogen
[ "Take", "a", "list", "of", "molecules", "and", "return", "just", "a", "list", "of", "atomic", "symbols", "possibly", "adding", "hydrogen" ]
324625d1d8e740673f139658b2de4c9e1059739e
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/ase_tools/gas_phase_references.py#L8-L25
train
SUNCAT-Center/CatHub
cathub/ase_tools/gas_phase_references.py
construct_reference_system
def construct_reference_system( symbols, candidates=None, options=None, ): """Take a list of symbols and construct gas phase references system, when possible avoiding O2. Candidates can be rearranged, where earlier candidates get higher preference than later candidates assume symbols sorted by atomic number """ if hasattr(options, 'no_hydrogen') and options.no_hydrogen: add_hydrogen = False else: add_hydrogen = True references = {} sorted_candidates = [ 'H2', 'H2O', 'NH3', 'N2', 'CH4', 'CO', 'H2S', 'HCl', 'O2'] if candidates is None: candidates = sorted_candidates else: odd_candidates = [c for c in candidates if c not in sorted_candidates] candidates = [c for c in sorted_candidates if c in candidates] \ + odd_candidates added_symbols = [] # go symbols in adsorbate # to add reference species in procedural manner for symbol in symbols: added_symbols.append(symbol) for candidate in candidates: _symbols = ase.symbols.string2symbols(candidate) # Add partial adsorbate species # is subset of reference species # and reference species # is subset of full adsorbate species set if set(added_symbols) <= set(list(references.keys()) + _symbols) \ and set(list(references.keys()) + _symbols) <= set(symbols) \ and candidate not in references.values(): references[symbol] = candidate break else: raise UserWarning(( "No candidate satisfied {symbol}. Add more candidates\n" " Symbols {symbols}\n" " _Symbols {_symbols}\n" " References {references}\n" " Candidates {candidates}\n" ).format( symbol=symbol, symbols=symbols, _symbols=_symbols, candidates=candidates, references=list(references.keys()), )) sorted_references = [] references = list(references.items()) # put references in order so that each reference # only adds one one additional species in each step # while references: # for i, reference in enumerate(references): # if len(set(ase.symbols.string2symbols(reference[1])) - # set(x[0] for x in sorted_references)) == 1: # sorted_references.append(references.pop(i)) # break return references
python
def construct_reference_system( symbols, candidates=None, options=None, ): """Take a list of symbols and construct gas phase references system, when possible avoiding O2. Candidates can be rearranged, where earlier candidates get higher preference than later candidates assume symbols sorted by atomic number """ if hasattr(options, 'no_hydrogen') and options.no_hydrogen: add_hydrogen = False else: add_hydrogen = True references = {} sorted_candidates = [ 'H2', 'H2O', 'NH3', 'N2', 'CH4', 'CO', 'H2S', 'HCl', 'O2'] if candidates is None: candidates = sorted_candidates else: odd_candidates = [c for c in candidates if c not in sorted_candidates] candidates = [c for c in sorted_candidates if c in candidates] \ + odd_candidates added_symbols = [] # go symbols in adsorbate # to add reference species in procedural manner for symbol in symbols: added_symbols.append(symbol) for candidate in candidates: _symbols = ase.symbols.string2symbols(candidate) # Add partial adsorbate species # is subset of reference species # and reference species # is subset of full adsorbate species set if set(added_symbols) <= set(list(references.keys()) + _symbols) \ and set(list(references.keys()) + _symbols) <= set(symbols) \ and candidate not in references.values(): references[symbol] = candidate break else: raise UserWarning(( "No candidate satisfied {symbol}. Add more candidates\n" " Symbols {symbols}\n" " _Symbols {_symbols}\n" " References {references}\n" " Candidates {candidates}\n" ).format( symbol=symbol, symbols=symbols, _symbols=_symbols, candidates=candidates, references=list(references.keys()), )) sorted_references = [] references = list(references.items()) # put references in order so that each reference # only adds one one additional species in each step # while references: # for i, reference in enumerate(references): # if len(set(ase.symbols.string2symbols(reference[1])) - # set(x[0] for x in sorted_references)) == 1: # sorted_references.append(references.pop(i)) # break return references
[ "def", "construct_reference_system", "(", "symbols", ",", "candidates", "=", "None", ",", "options", "=", "None", ",", ")", ":", "if", "hasattr", "(", "options", ",", "'no_hydrogen'", ")", "and", "options", ".", "no_hydrogen", ":", "add_hydrogen", "=", "False", "else", ":", "add_hydrogen", "=", "True", "references", "=", "{", "}", "sorted_candidates", "=", "[", "'H2'", ",", "'H2O'", ",", "'NH3'", ",", "'N2'", ",", "'CH4'", ",", "'CO'", ",", "'H2S'", ",", "'HCl'", ",", "'O2'", "]", "if", "candidates", "is", "None", ":", "candidates", "=", "sorted_candidates", "else", ":", "odd_candidates", "=", "[", "c", "for", "c", "in", "candidates", "if", "c", "not", "in", "sorted_candidates", "]", "candidates", "=", "[", "c", "for", "c", "in", "sorted_candidates", "if", "c", "in", "candidates", "]", "+", "odd_candidates", "added_symbols", "=", "[", "]", "# go symbols in adsorbate", "# to add reference species in procedural manner", "for", "symbol", "in", "symbols", ":", "added_symbols", ".", "append", "(", "symbol", ")", "for", "candidate", "in", "candidates", ":", "_symbols", "=", "ase", ".", "symbols", ".", "string2symbols", "(", "candidate", ")", "# Add partial adsorbate species", "# is subset of reference species", "# and reference species", "# is subset of full adsorbate species set", "if", "set", "(", "added_symbols", ")", "<=", "set", "(", "list", "(", "references", ".", "keys", "(", ")", ")", "+", "_symbols", ")", "and", "set", "(", "list", "(", "references", ".", "keys", "(", ")", ")", "+", "_symbols", ")", "<=", "set", "(", "symbols", ")", "and", "candidate", "not", "in", "references", ".", "values", "(", ")", ":", "references", "[", "symbol", "]", "=", "candidate", "break", "else", ":", "raise", "UserWarning", "(", "(", "\"No candidate satisfied {symbol}. Add more candidates\\n\"", "\" Symbols {symbols}\\n\"", "\" _Symbols {_symbols}\\n\"", "\" References {references}\\n\"", "\" Candidates {candidates}\\n\"", ")", ".", "format", "(", "symbol", "=", "symbol", ",", "symbols", "=", "symbols", ",", "_symbols", "=", "_symbols", ",", "candidates", "=", "candidates", ",", "references", "=", "list", "(", "references", ".", "keys", "(", ")", ")", ",", ")", ")", "sorted_references", "=", "[", "]", "references", "=", "list", "(", "references", ".", "items", "(", ")", ")", "# put references in order so that each reference", "# only adds one one additional species in each step", "# while references:", "# for i, reference in enumerate(references):", "# if len(set(ase.symbols.string2symbols(reference[1])) -", "# set(x[0] for x in sorted_references)) == 1:", "# sorted_references.append(references.pop(i))", "# break", "return", "references" ]
Take a list of symbols and construct gas phase references system, when possible avoiding O2. Candidates can be rearranged, where earlier candidates get higher preference than later candidates assume symbols sorted by atomic number
[ "Take", "a", "list", "of", "symbols", "and", "construct", "gas", "phase", "references", "system", "when", "possible", "avoiding", "O2", ".", "Candidates", "can", "be", "rearranged", "where", "earlier", "candidates", "get", "higher", "preference", "than", "later", "candidates" ]
324625d1d8e740673f139658b2de4c9e1059739e
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/ase_tools/gas_phase_references.py#L28-L106
train
SUNCAT-Center/CatHub
cathub/ase_tools/gas_phase_references.py
get_stoichiometry_factors
def get_stoichiometry_factors(adsorbates, references): """Take a list of adsorabtes and a corresponding reference system and return a list of dictionaries encoding the stoichiometry factors converting between adsorbates and reference molecules. """ stoichiometry = get_atomic_stoichiometry(references) stoichiometry_factors = {} for adsorbate in adsorbates: for symbol in ase.symbols.string2symbols(adsorbate): symbol_index = list( map(lambda _x: _x[0], references)).index(symbol) for (factor, (ref_symbol, ref_molecule)) in zip( stoichiometry[symbol_index], references): stoichiometry_factors.setdefault( adsorbate, {})[ref_molecule] = stoichiometry_factors.setdefault( adsorbate, {}).get( ref_molecule, 0) + factor nonzero_factors = {} for key, value in stoichiometry_factors[adsorbate].items(): if not np.isclose(value, 0.): nonzero_factors[key] = value stoichiometry_factors[adsorbate] = nonzero_factors return stoichiometry_factors
python
def get_stoichiometry_factors(adsorbates, references): """Take a list of adsorabtes and a corresponding reference system and return a list of dictionaries encoding the stoichiometry factors converting between adsorbates and reference molecules. """ stoichiometry = get_atomic_stoichiometry(references) stoichiometry_factors = {} for adsorbate in adsorbates: for symbol in ase.symbols.string2symbols(adsorbate): symbol_index = list( map(lambda _x: _x[0], references)).index(symbol) for (factor, (ref_symbol, ref_molecule)) in zip( stoichiometry[symbol_index], references): stoichiometry_factors.setdefault( adsorbate, {})[ref_molecule] = stoichiometry_factors.setdefault( adsorbate, {}).get( ref_molecule, 0) + factor nonzero_factors = {} for key, value in stoichiometry_factors[adsorbate].items(): if not np.isclose(value, 0.): nonzero_factors[key] = value stoichiometry_factors[adsorbate] = nonzero_factors return stoichiometry_factors
[ "def", "get_stoichiometry_factors", "(", "adsorbates", ",", "references", ")", ":", "stoichiometry", "=", "get_atomic_stoichiometry", "(", "references", ")", "stoichiometry_factors", "=", "{", "}", "for", "adsorbate", "in", "adsorbates", ":", "for", "symbol", "in", "ase", ".", "symbols", ".", "string2symbols", "(", "adsorbate", ")", ":", "symbol_index", "=", "list", "(", "map", "(", "lambda", "_x", ":", "_x", "[", "0", "]", ",", "references", ")", ")", ".", "index", "(", "symbol", ")", "for", "(", "factor", ",", "(", "ref_symbol", ",", "ref_molecule", ")", ")", "in", "zip", "(", "stoichiometry", "[", "symbol_index", "]", ",", "references", ")", ":", "stoichiometry_factors", ".", "setdefault", "(", "adsorbate", ",", "{", "}", ")", "[", "ref_molecule", "]", "=", "stoichiometry_factors", ".", "setdefault", "(", "adsorbate", ",", "{", "}", ")", ".", "get", "(", "ref_molecule", ",", "0", ")", "+", "factor", "nonzero_factors", "=", "{", "}", "for", "key", ",", "value", "in", "stoichiometry_factors", "[", "adsorbate", "]", ".", "items", "(", ")", ":", "if", "not", "np", ".", "isclose", "(", "value", ",", "0.", ")", ":", "nonzero_factors", "[", "key", "]", "=", "value", "stoichiometry_factors", "[", "adsorbate", "]", "=", "nonzero_factors", "return", "stoichiometry_factors" ]
Take a list of adsorabtes and a corresponding reference system and return a list of dictionaries encoding the stoichiometry factors converting between adsorbates and reference molecules.
[ "Take", "a", "list", "of", "adsorabtes", "and", "a", "corresponding", "reference", "system", "and", "return", "a", "list", "of", "dictionaries", "encoding", "the", "stoichiometry", "factors", "converting", "between", "adsorbates", "and", "reference", "molecules", "." ]
324625d1d8e740673f139658b2de4c9e1059739e
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/ase_tools/gas_phase_references.py#L132-L161
train
druids/django-chamber
chamber/importers/__init__.py
AbstractCSVImporter.get_fields_dict
def get_fields_dict(self, row): """ Returns a dict of field name and cleaned value pairs to initialize the model. Beware, it aligns the lists of fields and row values with Nones to allow for adding fields not found in the CSV. Whitespace around the value of the cell is stripped. """ return {k: getattr(self, 'clean_{}'.format(k), lambda x: x)(v.strip() if isinstance(v, str) else None) for k, v in zip_longest(self.get_fields(), row)}
python
def get_fields_dict(self, row): """ Returns a dict of field name and cleaned value pairs to initialize the model. Beware, it aligns the lists of fields and row values with Nones to allow for adding fields not found in the CSV. Whitespace around the value of the cell is stripped. """ return {k: getattr(self, 'clean_{}'.format(k), lambda x: x)(v.strip() if isinstance(v, str) else None) for k, v in zip_longest(self.get_fields(), row)}
[ "def", "get_fields_dict", "(", "self", ",", "row", ")", ":", "return", "{", "k", ":", "getattr", "(", "self", ",", "'clean_{}'", ".", "format", "(", "k", ")", ",", "lambda", "x", ":", "x", ")", "(", "v", ".", "strip", "(", ")", "if", "isinstance", "(", "v", ",", "str", ")", "else", "None", ")", "for", "k", ",", "v", "in", "zip_longest", "(", "self", ".", "get_fields", "(", ")", ",", "row", ")", "}" ]
Returns a dict of field name and cleaned value pairs to initialize the model. Beware, it aligns the lists of fields and row values with Nones to allow for adding fields not found in the CSV. Whitespace around the value of the cell is stripped.
[ "Returns", "a", "dict", "of", "field", "name", "and", "cleaned", "value", "pairs", "to", "initialize", "the", "model", ".", "Beware", "it", "aligns", "the", "lists", "of", "fields", "and", "row", "values", "with", "Nones", "to", "allow", "for", "adding", "fields", "not", "found", "in", "the", "CSV", ".", "Whitespace", "around", "the", "value", "of", "the", "cell", "is", "stripped", "." ]
eef4169923557e96877a664fa254e8c0814f3f23
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/importers/__init__.py#L90-L98
train
guaix-ucm/numina
numina/store/gtc/load.py
process_node
def process_node(node): """Process a node in result.json structure""" value = node['value'] mname = node['name'] typeid = node['typeid'] if typeid == 52: # StructDataValue obj = {} for el in value['elements']: key, val = process_node(el) obj[key] = val if value['struct_type'] != 'dict': # Value is not a dict klass = objimp.import_object(value['struct_type']) newobj = klass.__new__(klass) if hasattr(newobj, '__setstate__'): newobj.__setstate__(obj) else: newobj.__dict__ = obj obj = newobj elif typeid == 9: data = value['data'] dim = value['dimension'] shape = dim['height'], dim['width'] obj = data elif typeid == 90: # StructDataValueList obj = [] for el in value: sobj = {} for sel in el['elements']: key, val = process_node(sel) sobj[key] = val obj.append(sobj) elif typeid == 45: # Frame obj = dataframe.DataFrame(frame=os.path.abspath(value['path'])) else: obj = value return mname, obj
python
def process_node(node): """Process a node in result.json structure""" value = node['value'] mname = node['name'] typeid = node['typeid'] if typeid == 52: # StructDataValue obj = {} for el in value['elements']: key, val = process_node(el) obj[key] = val if value['struct_type'] != 'dict': # Value is not a dict klass = objimp.import_object(value['struct_type']) newobj = klass.__new__(klass) if hasattr(newobj, '__setstate__'): newobj.__setstate__(obj) else: newobj.__dict__ = obj obj = newobj elif typeid == 9: data = value['data'] dim = value['dimension'] shape = dim['height'], dim['width'] obj = data elif typeid == 90: # StructDataValueList obj = [] for el in value: sobj = {} for sel in el['elements']: key, val = process_node(sel) sobj[key] = val obj.append(sobj) elif typeid == 45: # Frame obj = dataframe.DataFrame(frame=os.path.abspath(value['path'])) else: obj = value return mname, obj
[ "def", "process_node", "(", "node", ")", ":", "value", "=", "node", "[", "'value'", "]", "mname", "=", "node", "[", "'name'", "]", "typeid", "=", "node", "[", "'typeid'", "]", "if", "typeid", "==", "52", ":", "# StructDataValue", "obj", "=", "{", "}", "for", "el", "in", "value", "[", "'elements'", "]", ":", "key", ",", "val", "=", "process_node", "(", "el", ")", "obj", "[", "key", "]", "=", "val", "if", "value", "[", "'struct_type'", "]", "!=", "'dict'", ":", "# Value is not a dict", "klass", "=", "objimp", ".", "import_object", "(", "value", "[", "'struct_type'", "]", ")", "newobj", "=", "klass", ".", "__new__", "(", "klass", ")", "if", "hasattr", "(", "newobj", ",", "'__setstate__'", ")", ":", "newobj", ".", "__setstate__", "(", "obj", ")", "else", ":", "newobj", ".", "__dict__", "=", "obj", "obj", "=", "newobj", "elif", "typeid", "==", "9", ":", "data", "=", "value", "[", "'data'", "]", "dim", "=", "value", "[", "'dimension'", "]", "shape", "=", "dim", "[", "'height'", "]", ",", "dim", "[", "'width'", "]", "obj", "=", "data", "elif", "typeid", "==", "90", ":", "# StructDataValueList", "obj", "=", "[", "]", "for", "el", "in", "value", ":", "sobj", "=", "{", "}", "for", "sel", "in", "el", "[", "'elements'", "]", ":", "key", ",", "val", "=", "process_node", "(", "sel", ")", "sobj", "[", "key", "]", "=", "val", "obj", ".", "append", "(", "sobj", ")", "elif", "typeid", "==", "45", ":", "# Frame", "obj", "=", "dataframe", ".", "DataFrame", "(", "frame", "=", "os", ".", "path", ".", "abspath", "(", "value", "[", "'path'", "]", ")", ")", "else", ":", "obj", "=", "value", "return", "mname", ",", "obj" ]
Process a node in result.json structure
[ "Process", "a", "node", "in", "result", ".", "json", "structure" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/store/gtc/load.py#L20-L59
train
guaix-ucm/numina
numina/store/gtc/load.py
build_result
def build_result(data): """Create a dictionary with the contents of result.json""" more = {} for key, value in data.items(): if key != 'elements': newnode = value else: newnode = {} for el in value: nkey, nvalue = process_node(el) newnode[nkey] = nvalue more[key] = newnode return more
python
def build_result(data): """Create a dictionary with the contents of result.json""" more = {} for key, value in data.items(): if key != 'elements': newnode = value else: newnode = {} for el in value: nkey, nvalue = process_node(el) newnode[nkey] = nvalue more[key] = newnode return more
[ "def", "build_result", "(", "data", ")", ":", "more", "=", "{", "}", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "key", "!=", "'elements'", ":", "newnode", "=", "value", "else", ":", "newnode", "=", "{", "}", "for", "el", "in", "value", ":", "nkey", ",", "nvalue", "=", "process_node", "(", "el", ")", "newnode", "[", "nkey", "]", "=", "nvalue", "more", "[", "key", "]", "=", "newnode", "return", "more" ]
Create a dictionary with the contents of result.json
[ "Create", "a", "dictionary", "with", "the", "contents", "of", "result", ".", "json" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/store/gtc/load.py#L62-L76
train
guaix-ucm/numina
numina/core/recipeinout.py
RecipeInOut._finalize
def _finalize(self, all_msg_errors=None): """Access all the instance descriptors This wil trigger an exception if a required parameter is not set """ if all_msg_errors is None: all_msg_errors = [] for key in self.stored(): try: getattr(self, key) except (ValueError, TypeError) as err: all_msg_errors.append(err.args[0]) # Raises a list of all the missing entries if all_msg_errors: raise ValueError(all_msg_errors)
python
def _finalize(self, all_msg_errors=None): """Access all the instance descriptors This wil trigger an exception if a required parameter is not set """ if all_msg_errors is None: all_msg_errors = [] for key in self.stored(): try: getattr(self, key) except (ValueError, TypeError) as err: all_msg_errors.append(err.args[0]) # Raises a list of all the missing entries if all_msg_errors: raise ValueError(all_msg_errors)
[ "def", "_finalize", "(", "self", ",", "all_msg_errors", "=", "None", ")", ":", "if", "all_msg_errors", "is", "None", ":", "all_msg_errors", "=", "[", "]", "for", "key", "in", "self", ".", "stored", "(", ")", ":", "try", ":", "getattr", "(", "self", ",", "key", ")", "except", "(", "ValueError", ",", "TypeError", ")", "as", "err", ":", "all_msg_errors", ".", "append", "(", "err", ".", "args", "[", "0", "]", ")", "# Raises a list of all the missing entries", "if", "all_msg_errors", ":", "raise", "ValueError", "(", "all_msg_errors", ")" ]
Access all the instance descriptors This wil trigger an exception if a required parameter is not set
[ "Access", "all", "the", "instance", "descriptors" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipeinout.py#L44-L61
train
guaix-ucm/numina
numina/core/recipeinout.py
RecipeInOut.validate
def validate(self): """Validate myself.""" for key, req in self.stored().items(): val = getattr(self, key) req.validate(val) # Run checks defined in __checkers__ self._run_checks()
python
def validate(self): """Validate myself.""" for key, req in self.stored().items(): val = getattr(self, key) req.validate(val) # Run checks defined in __checkers__ self._run_checks()
[ "def", "validate", "(", "self", ")", ":", "for", "key", ",", "req", "in", "self", ".", "stored", "(", ")", ".", "items", "(", ")", ":", "val", "=", "getattr", "(", "self", ",", "key", ")", "req", ".", "validate", "(", "val", ")", "# Run checks defined in __checkers__", "self", ".", "_run_checks", "(", ")" ]
Validate myself.
[ "Validate", "myself", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipeinout.py#L70-L78
train
guaix-ucm/numina
numina/core/validator.py
validate
def validate(method): """Decorate run method, inputs and outputs are validated""" @wraps(method) def mod_run(self, rinput): self.validate_input(rinput) # result = method(self, rinput) # self.validate_result(result) return result return mod_run
python
def validate(method): """Decorate run method, inputs and outputs are validated""" @wraps(method) def mod_run(self, rinput): self.validate_input(rinput) # result = method(self, rinput) # self.validate_result(result) return result return mod_run
[ "def", "validate", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "mod_run", "(", "self", ",", "rinput", ")", ":", "self", ".", "validate_input", "(", "rinput", ")", "#", "result", "=", "method", "(", "self", ",", "rinput", ")", "#", "self", ".", "validate_result", "(", "result", ")", "return", "result", "return", "mod_run" ]
Decorate run method, inputs and outputs are validated
[ "Decorate", "run", "method", "inputs", "and", "outputs", "are", "validated" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/validator.py#L19-L31
train
guaix-ucm/numina
numina/core/validator.py
as_list
def as_list(callable): """Convert a scalar validator in a list validator""" @wraps(callable) def wrapper(value_iter): return [callable(value) for value in value_iter] return wrapper
python
def as_list(callable): """Convert a scalar validator in a list validator""" @wraps(callable) def wrapper(value_iter): return [callable(value) for value in value_iter] return wrapper
[ "def", "as_list", "(", "callable", ")", ":", "@", "wraps", "(", "callable", ")", "def", "wrapper", "(", "value_iter", ")", ":", "return", "[", "callable", "(", "value", ")", "for", "value", "in", "value_iter", "]", "return", "wrapper" ]
Convert a scalar validator in a list validator
[ "Convert", "a", "scalar", "validator", "in", "a", "list", "validator" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/validator.py#L41-L47
train
guaix-ucm/numina
numina/core/validator.py
range_validator
def range_validator(minval=None, maxval=None): """Generates a function that validates that a number is within range Parameters ========== minval: numeric, optional: Values strictly lesser than `minval` are rejected maxval: numeric, optional: Values strictly greater than `maxval` are rejected Returns ======= A function that returns values if are in the range and raises ValidationError is the values are outside the range """ def checker_func(value): if minval is not None and value < minval: msg = "must be >= {}".format(minval) raise ValidationError(msg) if maxval is not None and value > maxval: msg = "must be <= {}".format(maxval) raise ValidationError(msg) return value return checker_func
python
def range_validator(minval=None, maxval=None): """Generates a function that validates that a number is within range Parameters ========== minval: numeric, optional: Values strictly lesser than `minval` are rejected maxval: numeric, optional: Values strictly greater than `maxval` are rejected Returns ======= A function that returns values if are in the range and raises ValidationError is the values are outside the range """ def checker_func(value): if minval is not None and value < minval: msg = "must be >= {}".format(minval) raise ValidationError(msg) if maxval is not None and value > maxval: msg = "must be <= {}".format(maxval) raise ValidationError(msg) return value return checker_func
[ "def", "range_validator", "(", "minval", "=", "None", ",", "maxval", "=", "None", ")", ":", "def", "checker_func", "(", "value", ")", ":", "if", "minval", "is", "not", "None", "and", "value", "<", "minval", ":", "msg", "=", "\"must be >= {}\"", ".", "format", "(", "minval", ")", "raise", "ValidationError", "(", "msg", ")", "if", "maxval", "is", "not", "None", "and", "value", ">", "maxval", ":", "msg", "=", "\"must be <= {}\"", ".", "format", "(", "maxval", ")", "raise", "ValidationError", "(", "msg", ")", "return", "value", "return", "checker_func" ]
Generates a function that validates that a number is within range Parameters ========== minval: numeric, optional: Values strictly lesser than `minval` are rejected maxval: numeric, optional: Values strictly greater than `maxval` are rejected Returns ======= A function that returns values if are in the range and raises ValidationError is the values are outside the range
[ "Generates", "a", "function", "that", "validates", "that", "a", "number", "is", "within", "range" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/validator.py#L50-L75
train
pylp/pylp
pylp/cli/run.py
run
def run(path, tasks): """Run a pylpfile.""" # Test if the pylpfile exists readable_path = make_readable_path(path) if not os.path.isfile(path): logger.log(logger.red("Can't read pylpfile "), logger.magenta(readable_path)) sys.exit(-1) else: logger.log("Using pylpfile ", logger.magenta(readable_path)) # Run the pylpfile try: runpy.run_path(path, None, "pylpfile") except Exception as e: traceback.print_exc(file=sys.stdout) logger.log(logger.red("\nAn error has occurred during the execution of the pylpfile")) sys.exit(-1) # Start the tasks for name in tasks: pylp.start(name) # Wait until all task are executed loop = asyncio.get_event_loop() loop.run_until_complete(wait_and_quit(loop))
python
def run(path, tasks): """Run a pylpfile.""" # Test if the pylpfile exists readable_path = make_readable_path(path) if not os.path.isfile(path): logger.log(logger.red("Can't read pylpfile "), logger.magenta(readable_path)) sys.exit(-1) else: logger.log("Using pylpfile ", logger.magenta(readable_path)) # Run the pylpfile try: runpy.run_path(path, None, "pylpfile") except Exception as e: traceback.print_exc(file=sys.stdout) logger.log(logger.red("\nAn error has occurred during the execution of the pylpfile")) sys.exit(-1) # Start the tasks for name in tasks: pylp.start(name) # Wait until all task are executed loop = asyncio.get_event_loop() loop.run_until_complete(wait_and_quit(loop))
[ "def", "run", "(", "path", ",", "tasks", ")", ":", "# Test if the pylpfile exists", "readable_path", "=", "make_readable_path", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "logger", ".", "log", "(", "logger", ".", "red", "(", "\"Can't read pylpfile \"", ")", ",", "logger", ".", "magenta", "(", "readable_path", ")", ")", "sys", ".", "exit", "(", "-", "1", ")", "else", ":", "logger", ".", "log", "(", "\"Using pylpfile \"", ",", "logger", ".", "magenta", "(", "readable_path", ")", ")", "# Run the pylpfile", "try", ":", "runpy", ".", "run_path", "(", "path", ",", "None", ",", "\"pylpfile\"", ")", "except", "Exception", "as", "e", ":", "traceback", ".", "print_exc", "(", "file", "=", "sys", ".", "stdout", ")", "logger", ".", "log", "(", "logger", ".", "red", "(", "\"\\nAn error has occurred during the execution of the pylpfile\"", ")", ")", "sys", ".", "exit", "(", "-", "1", ")", "# Start the tasks", "for", "name", "in", "tasks", ":", "pylp", ".", "start", "(", "name", ")", "# Wait until all task are executed", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "loop", ".", "run_until_complete", "(", "wait_and_quit", "(", "loop", ")", ")" ]
Run a pylpfile.
[ "Run", "a", "pylpfile", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/run.py#L18-L45
train
pylp/pylp
pylp/cli/run.py
wait_and_quit
async def wait_and_quit(loop): """Wait until all task are executed.""" from pylp.lib.tasks import running if running: await asyncio.wait(map(lambda runner: runner.future, running))
python
async def wait_and_quit(loop): """Wait until all task are executed.""" from pylp.lib.tasks import running if running: await asyncio.wait(map(lambda runner: runner.future, running))
[ "async", "def", "wait_and_quit", "(", "loop", ")", ":", "from", "pylp", ".", "lib", ".", "tasks", "import", "running", "if", "running", ":", "await", "asyncio", ".", "wait", "(", "map", "(", "lambda", "runner", ":", "runner", ".", "future", ",", "running", ")", ")" ]
Wait until all task are executed.
[ "Wait", "until", "all", "task", "are", "executed", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/run.py#L49-L53
train
inspirehep/inspire-schemas
inspire_schemas/readers/literature.py
LiteratureReader.is_published
def is_published(self): """Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True """ citeable = 'publication_info' in self.record and \ is_citeable(self.record['publication_info']) submitted = 'dois' in self.record and any( 'journal_title' in el for el in force_list(self.record.get('publication_info')) ) return citeable or submitted
python
def is_published(self): """Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True """ citeable = 'publication_info' in self.record and \ is_citeable(self.record['publication_info']) submitted = 'dois' in self.record and any( 'journal_title' in el for el in force_list(self.record.get('publication_info')) ) return citeable or submitted
[ "def", "is_published", "(", "self", ")", ":", "citeable", "=", "'publication_info'", "in", "self", ".", "record", "and", "is_citeable", "(", "self", ".", "record", "[", "'publication_info'", "]", ")", "submitted", "=", "'dois'", "in", "self", ".", "record", "and", "any", "(", "'journal_title'", "in", "el", "for", "el", "in", "force_list", "(", "self", ".", "record", ".", "get", "(", "'publication_info'", ")", ")", ")", "return", "citeable", "or", "submitted" ]
Return True if a record is published. We say that a record is published if it is citeable, which means that it has enough information in a ``publication_info``, or if we know its DOI and a ``journal_title``, which means it is in press. Returns: bool: whether the record is published. Examples: >>> record = { ... 'dois': [ ... {'value': '10.1016/0029-5582(61)90469-2'}, ... ], ... 'publication_info': [ ... {'journal_title': 'Nucl.Phys.'}, ... ], ... } >>> LiteratureReader(record).is_published True
[ "Return", "True", "if", "a", "record", "is", "published", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/readers/literature.py#L351-L382
train
inspirehep/inspire-schemas
inspire_schemas/readers/literature.py
LiteratureReader.get_page_artid_for_publication_info
def get_page_artid_for_publication_info(publication_info, separator): """Return the page range or the article id of a publication_info entry. Args: publication_info(dict): a publication_info field entry of a record separator(basestring): optional page range symbol, defaults to a single dash Returns: string: the page range or the article id of the record. Examples: >>> publication_info = {'artid': '054021'} >>> get_page_artid(publication_info) '054021' """ if 'artid' in publication_info: return publication_info['artid'] elif 'page_start' in publication_info and 'page_end' in publication_info: page_start = publication_info['page_start'] page_end = publication_info['page_end'] return text_type('{}{}{}').format( page_start, text_type(separator), page_end ) return ''
python
def get_page_artid_for_publication_info(publication_info, separator): """Return the page range or the article id of a publication_info entry. Args: publication_info(dict): a publication_info field entry of a record separator(basestring): optional page range symbol, defaults to a single dash Returns: string: the page range or the article id of the record. Examples: >>> publication_info = {'artid': '054021'} >>> get_page_artid(publication_info) '054021' """ if 'artid' in publication_info: return publication_info['artid'] elif 'page_start' in publication_info and 'page_end' in publication_info: page_start = publication_info['page_start'] page_end = publication_info['page_end'] return text_type('{}{}{}').format( page_start, text_type(separator), page_end ) return ''
[ "def", "get_page_artid_for_publication_info", "(", "publication_info", ",", "separator", ")", ":", "if", "'artid'", "in", "publication_info", ":", "return", "publication_info", "[", "'artid'", "]", "elif", "'page_start'", "in", "publication_info", "and", "'page_end'", "in", "publication_info", ":", "page_start", "=", "publication_info", "[", "'page_start'", "]", "page_end", "=", "publication_info", "[", "'page_end'", "]", "return", "text_type", "(", "'{}{}{}'", ")", ".", "format", "(", "page_start", ",", "text_type", "(", "separator", ")", ",", "page_end", ")", "return", "''" ]
Return the page range or the article id of a publication_info entry. Args: publication_info(dict): a publication_info field entry of a record separator(basestring): optional page range symbol, defaults to a single dash Returns: string: the page range or the article id of the record. Examples: >>> publication_info = {'artid': '054021'} >>> get_page_artid(publication_info) '054021'
[ "Return", "the", "page", "range", "or", "the", "article", "id", "of", "a", "publication_info", "entry", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/readers/literature.py#L449-L475
train
inspirehep/inspire-schemas
inspire_schemas/readers/literature.py
LiteratureReader.get_page_artid
def get_page_artid(self, separator='-'): """Return the page range or the article id of a record. Args: separator(basestring): optional page range symbol, defaults to a single dash Returns: string: the page range or the article id of the record. Examples: >>> record = { ... 'publication_info': [ ... {'artid': '054021'}, ... ], ... } >>> LiteratureReader(record).get_page_artid() '054021' """ publication_info = get_value( self.record, 'publication_info[0]', default={} ) return LiteratureReader.get_page_artid_for_publication_info( publication_info, separator )
python
def get_page_artid(self, separator='-'): """Return the page range or the article id of a record. Args: separator(basestring): optional page range symbol, defaults to a single dash Returns: string: the page range or the article id of the record. Examples: >>> record = { ... 'publication_info': [ ... {'artid': '054021'}, ... ], ... } >>> LiteratureReader(record).get_page_artid() '054021' """ publication_info = get_value( self.record, 'publication_info[0]', default={} ) return LiteratureReader.get_page_artid_for_publication_info( publication_info, separator )
[ "def", "get_page_artid", "(", "self", ",", "separator", "=", "'-'", ")", ":", "publication_info", "=", "get_value", "(", "self", ".", "record", ",", "'publication_info[0]'", ",", "default", "=", "{", "}", ")", "return", "LiteratureReader", ".", "get_page_artid_for_publication_info", "(", "publication_info", ",", "separator", ")" ]
Return the page range or the article id of a record. Args: separator(basestring): optional page range symbol, defaults to a single dash Returns: string: the page range or the article id of the record. Examples: >>> record = { ... 'publication_info': [ ... {'artid': '054021'}, ... ], ... } >>> LiteratureReader(record).get_page_artid() '054021'
[ "Return", "the", "page", "range", "or", "the", "article", "id", "of", "a", "record", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/readers/literature.py#L477-L504
train
xflr6/bitsets
bitsets/transform.py
chunkreverse
def chunkreverse(integers, dtype='L'): """Yield integers of dtype bit-length reverting their bit-order. >>> list(chunkreverse([0b10000000, 0b11000000, 0b00000001], 'B')) [1, 3, 128] >>> list(chunkreverse([0x8000, 0xC000, 0x0001], 'H')) [1, 3, 32768] """ if dtype in ('B', 8): return map(RBYTES.__getitem__, integers) fmt = '{0:0%db}' % NBITS[dtype] return (int(fmt.format(chunk)[::-1], 2) for chunk in integers)
python
def chunkreverse(integers, dtype='L'): """Yield integers of dtype bit-length reverting their bit-order. >>> list(chunkreverse([0b10000000, 0b11000000, 0b00000001], 'B')) [1, 3, 128] >>> list(chunkreverse([0x8000, 0xC000, 0x0001], 'H')) [1, 3, 32768] """ if dtype in ('B', 8): return map(RBYTES.__getitem__, integers) fmt = '{0:0%db}' % NBITS[dtype] return (int(fmt.format(chunk)[::-1], 2) for chunk in integers)
[ "def", "chunkreverse", "(", "integers", ",", "dtype", "=", "'L'", ")", ":", "if", "dtype", "in", "(", "'B'", ",", "8", ")", ":", "return", "map", "(", "RBYTES", ".", "__getitem__", ",", "integers", ")", "fmt", "=", "'{0:0%db}'", "%", "NBITS", "[", "dtype", "]", "return", "(", "int", "(", "fmt", ".", "format", "(", "chunk", ")", "[", ":", ":", "-", "1", "]", ",", "2", ")", "for", "chunk", "in", "integers", ")" ]
Yield integers of dtype bit-length reverting their bit-order. >>> list(chunkreverse([0b10000000, 0b11000000, 0b00000001], 'B')) [1, 3, 128] >>> list(chunkreverse([0x8000, 0xC000, 0x0001], 'H')) [1, 3, 32768]
[ "Yield", "integers", "of", "dtype", "bit", "-", "length", "reverting", "their", "bit", "-", "order", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L26-L40
train
xflr6/bitsets
bitsets/transform.py
pack
def pack(chunks, r=32): """Return integer concatenating integer chunks of r > 0 bit-length. >>> pack([0, 1, 0, 1, 0, 1], 1) 42 >>> pack([0, 1], 8) 256 >>> pack([0, 1], 0) Traceback (most recent call last): ... ValueError: pack needs r > 0 """ if r < 1: raise ValueError('pack needs r > 0') n = shift = 0 for c in chunks: n += c << shift shift += r return n
python
def pack(chunks, r=32): """Return integer concatenating integer chunks of r > 0 bit-length. >>> pack([0, 1, 0, 1, 0, 1], 1) 42 >>> pack([0, 1], 8) 256 >>> pack([0, 1], 0) Traceback (most recent call last): ... ValueError: pack needs r > 0 """ if r < 1: raise ValueError('pack needs r > 0') n = shift = 0 for c in chunks: n += c << shift shift += r return n
[ "def", "pack", "(", "chunks", ",", "r", "=", "32", ")", ":", "if", "r", "<", "1", ":", "raise", "ValueError", "(", "'pack needs r > 0'", ")", "n", "=", "shift", "=", "0", "for", "c", "in", "chunks", ":", "n", "+=", "c", "<<", "shift", "shift", "+=", "r", "return", "n" ]
Return integer concatenating integer chunks of r > 0 bit-length. >>> pack([0, 1, 0, 1, 0, 1], 1) 42 >>> pack([0, 1], 8) 256 >>> pack([0, 1], 0) Traceback (most recent call last): ... ValueError: pack needs r > 0
[ "Return", "integer", "concatenating", "integer", "chunks", "of", "r", ">", "0", "bit", "-", "length", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L43-L66
train
xflr6/bitsets
bitsets/transform.py
unpack
def unpack(n, r=32): """Yield r > 0 bit-length integers splitting n into chunks. >>> list(unpack(42, 1)) [0, 1, 0, 1, 0, 1] >>> list(unpack(256, 8)) [0, 1] >>> list(unpack(2, 0)) Traceback (most recent call last): ... ValueError: unpack needs r > 0 """ if r < 1: raise ValueError('unpack needs r > 0') mask = (1 << r) - 1 while n: yield n & mask n >>= r
python
def unpack(n, r=32): """Yield r > 0 bit-length integers splitting n into chunks. >>> list(unpack(42, 1)) [0, 1, 0, 1, 0, 1] >>> list(unpack(256, 8)) [0, 1] >>> list(unpack(2, 0)) Traceback (most recent call last): ... ValueError: unpack needs r > 0 """ if r < 1: raise ValueError('unpack needs r > 0') mask = (1 << r) - 1 while n: yield n & mask n >>= r
[ "def", "unpack", "(", "n", ",", "r", "=", "32", ")", ":", "if", "r", "<", "1", ":", "raise", "ValueError", "(", "'unpack needs r > 0'", ")", "mask", "=", "(", "1", "<<", "r", ")", "-", "1", "while", "n", ":", "yield", "n", "&", "mask", "n", ">>=", "r" ]
Yield r > 0 bit-length integers splitting n into chunks. >>> list(unpack(42, 1)) [0, 1, 0, 1, 0, 1] >>> list(unpack(256, 8)) [0, 1] >>> list(unpack(2, 0)) Traceback (most recent call last): ... ValueError: unpack needs r > 0
[ "Yield", "r", ">", "0", "bit", "-", "length", "integers", "splitting", "n", "into", "chunks", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L69-L90
train
xflr6/bitsets
bitsets/transform.py
packbools
def packbools(bools, dtype='L'): """Yield integers concatenating bools in chunks of dtype bit-length. >>> list(packbools([False, True, False, True, False, True], 'B')) [42] """ r = NBITS[dtype] atoms = ATOMS[dtype] for chunk in zip_longest(*[iter(bools)] * r, fillvalue=False): yield sum(compress(atoms, chunk))
python
def packbools(bools, dtype='L'): """Yield integers concatenating bools in chunks of dtype bit-length. >>> list(packbools([False, True, False, True, False, True], 'B')) [42] """ r = NBITS[dtype] atoms = ATOMS[dtype] for chunk in zip_longest(*[iter(bools)] * r, fillvalue=False): yield sum(compress(atoms, chunk))
[ "def", "packbools", "(", "bools", ",", "dtype", "=", "'L'", ")", ":", "r", "=", "NBITS", "[", "dtype", "]", "atoms", "=", "ATOMS", "[", "dtype", "]", "for", "chunk", "in", "zip_longest", "(", "*", "[", "iter", "(", "bools", ")", "]", "*", "r", ",", "fillvalue", "=", "False", ")", ":", "yield", "sum", "(", "compress", "(", "atoms", ",", "chunk", ")", ")" ]
Yield integers concatenating bools in chunks of dtype bit-length. >>> list(packbools([False, True, False, True, False, True], 'B')) [42]
[ "Yield", "integers", "concatenating", "bools", "in", "chunks", "of", "dtype", "bit", "-", "length", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L93-L103
train
xflr6/bitsets
bitsets/transform.py
unpackbools
def unpackbools(integers, dtype='L'): """Yield booleans unpacking integers of dtype bit-length. >>> list(unpackbools([42], 'B')) [False, True, False, True, False, True, False, False] """ atoms = ATOMS[dtype] for chunk in integers: for a in atoms: yield not not chunk & a
python
def unpackbools(integers, dtype='L'): """Yield booleans unpacking integers of dtype bit-length. >>> list(unpackbools([42], 'B')) [False, True, False, True, False, True, False, False] """ atoms = ATOMS[dtype] for chunk in integers: for a in atoms: yield not not chunk & a
[ "def", "unpackbools", "(", "integers", ",", "dtype", "=", "'L'", ")", ":", "atoms", "=", "ATOMS", "[", "dtype", "]", "for", "chunk", "in", "integers", ":", "for", "a", "in", "atoms", ":", "yield", "not", "not", "chunk", "&", "a" ]
Yield booleans unpacking integers of dtype bit-length. >>> list(unpackbools([42], 'B')) [False, True, False, True, False, True, False, False]
[ "Yield", "booleans", "unpacking", "integers", "of", "dtype", "bit", "-", "length", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/transform.py#L106-L116
train
guaix-ucm/numina
numina/array/wavecalib/arccalibration.py
select_data_for_fit
def select_data_for_fit(list_of_wvfeatures): """Select information from valid arc lines to facilitate posterior fits. Parameters ---------- list_of_wvfeatures : list (of WavecalFeature instances) A list of size equal to the number of identified lines, which elements are instances of the class WavecalFeature, containing all the relevant information concerning the line identification. Returns ------- nfit : int Number of valid points for posterior fits. ifit : list of int List of indices corresponding to the arc lines which coordinates are going to be employed in the posterior fits. xfit : 1d numpy aray X coordinate of points for posterior fits. yfit : 1d numpy array Y coordinate of points for posterior fits. wfit : 1d numpy array Cost function of points for posterior fits. The inverse of these values can be employed for weighted fits. """ nlines_arc = len(list_of_wvfeatures) nfit = 0 ifit = [] xfit = np.array([]) yfit = np.array([]) wfit = np.array([]) for i in range(nlines_arc): if list_of_wvfeatures[i].line_ok: ifit.append(i) xfit = np.append(xfit, [list_of_wvfeatures[i].xpos]) yfit = np.append(yfit, [list_of_wvfeatures[i].reference]) wfit = np.append(wfit, [list_of_wvfeatures[i].funcost]) nfit += 1 return nfit, ifit, xfit, yfit, wfit
python
def select_data_for_fit(list_of_wvfeatures): """Select information from valid arc lines to facilitate posterior fits. Parameters ---------- list_of_wvfeatures : list (of WavecalFeature instances) A list of size equal to the number of identified lines, which elements are instances of the class WavecalFeature, containing all the relevant information concerning the line identification. Returns ------- nfit : int Number of valid points for posterior fits. ifit : list of int List of indices corresponding to the arc lines which coordinates are going to be employed in the posterior fits. xfit : 1d numpy aray X coordinate of points for posterior fits. yfit : 1d numpy array Y coordinate of points for posterior fits. wfit : 1d numpy array Cost function of points for posterior fits. The inverse of these values can be employed for weighted fits. """ nlines_arc = len(list_of_wvfeatures) nfit = 0 ifit = [] xfit = np.array([]) yfit = np.array([]) wfit = np.array([]) for i in range(nlines_arc): if list_of_wvfeatures[i].line_ok: ifit.append(i) xfit = np.append(xfit, [list_of_wvfeatures[i].xpos]) yfit = np.append(yfit, [list_of_wvfeatures[i].reference]) wfit = np.append(wfit, [list_of_wvfeatures[i].funcost]) nfit += 1 return nfit, ifit, xfit, yfit, wfit
[ "def", "select_data_for_fit", "(", "list_of_wvfeatures", ")", ":", "nlines_arc", "=", "len", "(", "list_of_wvfeatures", ")", "nfit", "=", "0", "ifit", "=", "[", "]", "xfit", "=", "np", ".", "array", "(", "[", "]", ")", "yfit", "=", "np", ".", "array", "(", "[", "]", ")", "wfit", "=", "np", ".", "array", "(", "[", "]", ")", "for", "i", "in", "range", "(", "nlines_arc", ")", ":", "if", "list_of_wvfeatures", "[", "i", "]", ".", "line_ok", ":", "ifit", ".", "append", "(", "i", ")", "xfit", "=", "np", ".", "append", "(", "xfit", ",", "[", "list_of_wvfeatures", "[", "i", "]", ".", "xpos", "]", ")", "yfit", "=", "np", ".", "append", "(", "yfit", ",", "[", "list_of_wvfeatures", "[", "i", "]", ".", "reference", "]", ")", "wfit", "=", "np", ".", "append", "(", "wfit", ",", "[", "list_of_wvfeatures", "[", "i", "]", ".", "funcost", "]", ")", "nfit", "+=", "1", "return", "nfit", ",", "ifit", ",", "xfit", ",", "yfit", ",", "wfit" ]
Select information from valid arc lines to facilitate posterior fits. Parameters ---------- list_of_wvfeatures : list (of WavecalFeature instances) A list of size equal to the number of identified lines, which elements are instances of the class WavecalFeature, containing all the relevant information concerning the line identification. Returns ------- nfit : int Number of valid points for posterior fits. ifit : list of int List of indices corresponding to the arc lines which coordinates are going to be employed in the posterior fits. xfit : 1d numpy aray X coordinate of points for posterior fits. yfit : 1d numpy array Y coordinate of points for posterior fits. wfit : 1d numpy array Cost function of points for posterior fits. The inverse of these values can be employed for weighted fits.
[ "Select", "information", "from", "valid", "arc", "lines", "to", "facilitate", "posterior", "fits", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L39-L82
train
guaix-ucm/numina
numina/array/wavecalib/arccalibration.py
gen_triplets_master
def gen_triplets_master(wv_master, geometry=None, debugplot=0): """Compute information associated to triplets in master table. Determine all the possible triplets that can be generated from the array `wv_master`. In addition, the relative position of the central line of each triplet is also computed. Parameters ---------- wv_master : 1d numpy array, float Array with wavelengths corresponding to the master table (Angstroms). geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- ntriplets_master : int Number of triplets built from master table. ratios_master_sorted : 1d numpy array, float Array with values of the relative position of the central line of each triplet, sorted in ascending order. triplets_master_sorted_list : list of tuples List with tuples of three numbers, corresponding to the three line indices in the master table. The list is sorted to be in correspondence with `ratios_master_sorted`. """ nlines_master = wv_master.size # Check that the wavelengths in the master table are sorted wv_previous = wv_master[0] for i in range(1, nlines_master): if wv_previous >= wv_master[i]: raise ValueError('Wavelengths:\n--> ' + str(wv_previous) + '\n--> ' + str(wv_master[i]) + '\nin master table are duplicated or not sorted') wv_previous = wv_master[i] # Generate all the possible triplets with the numbers of the lines # in the master table. Each triplet is defined as a tuple of three # numbers corresponding to the three line indices in the master # table. The collection of tuples is stored in an ordinary python # list. iter_comb_triplets = itertools.combinations(range(nlines_master), 3) triplets_master_list = [val for val in iter_comb_triplets] # Verify that the number of triplets coincides with the expected # value. ntriplets_master = len(triplets_master_list) if ntriplets_master == comb(nlines_master, 3, exact=True): if abs(debugplot) >= 10: print('>>> Total number of lines in master table:', nlines_master) print('>>> Number of triplets in master table...:', ntriplets_master) else: raise ValueError('Invalid number of combinations') # For each triplet, compute the relative position of the central # line. ratios_master = np.zeros(ntriplets_master) for index, value in enumerate(triplets_master_list): i1, i2, i3 = value delta1 = wv_master[i2] - wv_master[i1] delta2 = wv_master[i3] - wv_master[i1] ratios_master[index] = delta1 / delta2 # Compute the array of indices that index the above ratios in # sorted order. isort_ratios_master = np.argsort(ratios_master) # Simultaneous sort of position ratios and triplets. ratios_master_sorted = ratios_master[isort_ratios_master] triplets_master_sorted_list = [triplets_master_list[i] for i in isort_ratios_master] if abs(debugplot) in [21, 22]: # compute and plot histogram with position ratios bins_in = np.linspace(0.0, 1.0, 41) hist, bins_out = np.histogram(ratios_master, bins=bins_in) # from numina.array.display.matplotlib_qt import plt fig = plt.figure() ax = fig.add_subplot(111) width_hist = 0.8*(bins_out[1]-bins_out[0]) center = (bins_out[:-1]+bins_out[1:])/2 ax.bar(center, hist, align='center', width=width_hist) ax.set_xlabel('distance ratio in each triplet') ax.set_ylabel('Number of triplets') ax.set_title("Number of lines/triplets: " + str(nlines_master) + "/" + str(ntriplets_master)) # set window geometry set_window_geometry(geometry) pause_debugplot(debugplot, pltshow=True, tight_layout=True) return ntriplets_master, ratios_master_sorted, triplets_master_sorted_list
python
def gen_triplets_master(wv_master, geometry=None, debugplot=0): """Compute information associated to triplets in master table. Determine all the possible triplets that can be generated from the array `wv_master`. In addition, the relative position of the central line of each triplet is also computed. Parameters ---------- wv_master : 1d numpy array, float Array with wavelengths corresponding to the master table (Angstroms). geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- ntriplets_master : int Number of triplets built from master table. ratios_master_sorted : 1d numpy array, float Array with values of the relative position of the central line of each triplet, sorted in ascending order. triplets_master_sorted_list : list of tuples List with tuples of three numbers, corresponding to the three line indices in the master table. The list is sorted to be in correspondence with `ratios_master_sorted`. """ nlines_master = wv_master.size # Check that the wavelengths in the master table are sorted wv_previous = wv_master[0] for i in range(1, nlines_master): if wv_previous >= wv_master[i]: raise ValueError('Wavelengths:\n--> ' + str(wv_previous) + '\n--> ' + str(wv_master[i]) + '\nin master table are duplicated or not sorted') wv_previous = wv_master[i] # Generate all the possible triplets with the numbers of the lines # in the master table. Each triplet is defined as a tuple of three # numbers corresponding to the three line indices in the master # table. The collection of tuples is stored in an ordinary python # list. iter_comb_triplets = itertools.combinations(range(nlines_master), 3) triplets_master_list = [val for val in iter_comb_triplets] # Verify that the number of triplets coincides with the expected # value. ntriplets_master = len(triplets_master_list) if ntriplets_master == comb(nlines_master, 3, exact=True): if abs(debugplot) >= 10: print('>>> Total number of lines in master table:', nlines_master) print('>>> Number of triplets in master table...:', ntriplets_master) else: raise ValueError('Invalid number of combinations') # For each triplet, compute the relative position of the central # line. ratios_master = np.zeros(ntriplets_master) for index, value in enumerate(triplets_master_list): i1, i2, i3 = value delta1 = wv_master[i2] - wv_master[i1] delta2 = wv_master[i3] - wv_master[i1] ratios_master[index] = delta1 / delta2 # Compute the array of indices that index the above ratios in # sorted order. isort_ratios_master = np.argsort(ratios_master) # Simultaneous sort of position ratios and triplets. ratios_master_sorted = ratios_master[isort_ratios_master] triplets_master_sorted_list = [triplets_master_list[i] for i in isort_ratios_master] if abs(debugplot) in [21, 22]: # compute and plot histogram with position ratios bins_in = np.linspace(0.0, 1.0, 41) hist, bins_out = np.histogram(ratios_master, bins=bins_in) # from numina.array.display.matplotlib_qt import plt fig = plt.figure() ax = fig.add_subplot(111) width_hist = 0.8*(bins_out[1]-bins_out[0]) center = (bins_out[:-1]+bins_out[1:])/2 ax.bar(center, hist, align='center', width=width_hist) ax.set_xlabel('distance ratio in each triplet') ax.set_ylabel('Number of triplets') ax.set_title("Number of lines/triplets: " + str(nlines_master) + "/" + str(ntriplets_master)) # set window geometry set_window_geometry(geometry) pause_debugplot(debugplot, pltshow=True, tight_layout=True) return ntriplets_master, ratios_master_sorted, triplets_master_sorted_list
[ "def", "gen_triplets_master", "(", "wv_master", ",", "geometry", "=", "None", ",", "debugplot", "=", "0", ")", ":", "nlines_master", "=", "wv_master", ".", "size", "# Check that the wavelengths in the master table are sorted", "wv_previous", "=", "wv_master", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "nlines_master", ")", ":", "if", "wv_previous", ">=", "wv_master", "[", "i", "]", ":", "raise", "ValueError", "(", "'Wavelengths:\\n--> '", "+", "str", "(", "wv_previous", ")", "+", "'\\n--> '", "+", "str", "(", "wv_master", "[", "i", "]", ")", "+", "'\\nin master table are duplicated or not sorted'", ")", "wv_previous", "=", "wv_master", "[", "i", "]", "# Generate all the possible triplets with the numbers of the lines", "# in the master table. Each triplet is defined as a tuple of three", "# numbers corresponding to the three line indices in the master", "# table. The collection of tuples is stored in an ordinary python", "# list.", "iter_comb_triplets", "=", "itertools", ".", "combinations", "(", "range", "(", "nlines_master", ")", ",", "3", ")", "triplets_master_list", "=", "[", "val", "for", "val", "in", "iter_comb_triplets", "]", "# Verify that the number of triplets coincides with the expected", "# value.", "ntriplets_master", "=", "len", "(", "triplets_master_list", ")", "if", "ntriplets_master", "==", "comb", "(", "nlines_master", ",", "3", ",", "exact", "=", "True", ")", ":", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "print", "(", "'>>> Total number of lines in master table:'", ",", "nlines_master", ")", "print", "(", "'>>> Number of triplets in master table...:'", ",", "ntriplets_master", ")", "else", ":", "raise", "ValueError", "(", "'Invalid number of combinations'", ")", "# For each triplet, compute the relative position of the central", "# line.", "ratios_master", "=", "np", ".", "zeros", "(", "ntriplets_master", ")", "for", "index", ",", "value", "in", "enumerate", "(", "triplets_master_list", ")", ":", "i1", ",", "i2", ",", "i3", "=", "value", "delta1", "=", "wv_master", "[", "i2", "]", "-", "wv_master", "[", "i1", "]", "delta2", "=", "wv_master", "[", "i3", "]", "-", "wv_master", "[", "i1", "]", "ratios_master", "[", "index", "]", "=", "delta1", "/", "delta2", "# Compute the array of indices that index the above ratios in", "# sorted order.", "isort_ratios_master", "=", "np", ".", "argsort", "(", "ratios_master", ")", "# Simultaneous sort of position ratios and triplets.", "ratios_master_sorted", "=", "ratios_master", "[", "isort_ratios_master", "]", "triplets_master_sorted_list", "=", "[", "triplets_master_list", "[", "i", "]", "for", "i", "in", "isort_ratios_master", "]", "if", "abs", "(", "debugplot", ")", "in", "[", "21", ",", "22", "]", ":", "# compute and plot histogram with position ratios", "bins_in", "=", "np", ".", "linspace", "(", "0.0", ",", "1.0", ",", "41", ")", "hist", ",", "bins_out", "=", "np", ".", "histogram", "(", "ratios_master", ",", "bins", "=", "bins_in", ")", "#", "from", "numina", ".", "array", ".", "display", ".", "matplotlib_qt", "import", "plt", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "width_hist", "=", "0.8", "*", "(", "bins_out", "[", "1", "]", "-", "bins_out", "[", "0", "]", ")", "center", "=", "(", "bins_out", "[", ":", "-", "1", "]", "+", "bins_out", "[", "1", ":", "]", ")", "/", "2", "ax", ".", "bar", "(", "center", ",", "hist", ",", "align", "=", "'center'", ",", "width", "=", "width_hist", ")", "ax", ".", "set_xlabel", "(", "'distance ratio in each triplet'", ")", "ax", ".", "set_ylabel", "(", "'Number of triplets'", ")", "ax", ".", "set_title", "(", "\"Number of lines/triplets: \"", "+", "str", "(", "nlines_master", ")", "+", "\"/\"", "+", "str", "(", "ntriplets_master", ")", ")", "# set window geometry", "set_window_geometry", "(", "geometry", ")", "pause_debugplot", "(", "debugplot", ",", "pltshow", "=", "True", ",", "tight_layout", "=", "True", ")", "return", "ntriplets_master", ",", "ratios_master_sorted", ",", "triplets_master_sorted_list" ]
Compute information associated to triplets in master table. Determine all the possible triplets that can be generated from the array `wv_master`. In addition, the relative position of the central line of each triplet is also computed. Parameters ---------- wv_master : 1d numpy array, float Array with wavelengths corresponding to the master table (Angstroms). geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- ntriplets_master : int Number of triplets built from master table. ratios_master_sorted : 1d numpy array, float Array with values of the relative position of the central line of each triplet, sorted in ascending order. triplets_master_sorted_list : list of tuples List with tuples of three numbers, corresponding to the three line indices in the master table. The list is sorted to be in correspondence with `ratios_master_sorted`.
[ "Compute", "information", "associated", "to", "triplets", "in", "master", "table", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L324-L425
train
guaix-ucm/numina
numina/array/wavecalib/arccalibration.py
arccalibration
def arccalibration(wv_master, xpos_arc, naxis1_arc, crpix1, wv_ini_search, wv_end_search, wvmin_useful, wvmax_useful, error_xpos_arc, times_sigma_r, frac_triplets_for_sum, times_sigma_theil_sen, poly_degree_wfit, times_sigma_polfilt, times_sigma_cook, times_sigma_inclusion, geometry=None, debugplot=0): """Performs arc line identification for arc calibration. This function is a wrapper of two functions, which are responsible of computing all the relevant information concerning the triplets generated from the master table and the actual identification procedure of the arc lines, respectively. The separation of those computations in two different functions helps to avoid the repetition of calls to the first function when calibrating several arcs using the same master table. Parameters ---------- wv_master : 1d numpy array, float Array with wavelengths corresponding to the master table (Angstroms). xpos_arc : 1d numpy array, float Location of arc lines (pixels). naxis1_arc : int NAXIS1 for arc spectrum. crpix1 : float CRPIX1 value to be employed in the wavelength calibration. wv_ini_search : float Minimum expected wavelength in spectrum. wv_end_search : float Maximum expected wavelength in spectrum. wvmin_useful : float If not None, this value is used to clip detected lines below it. wvmax_useful : float If not None, this value is used to clip detected lines above it. error_xpos_arc : float Error in arc line position (pixels). times_sigma_r : float Times sigma to search for valid line position ratios. frac_triplets_for_sum : float Fraction of distances to different triplets to sum when computing the cost function. times_sigma_theil_sen : float Number of times the (robust) standard deviation around the linear fit (using the Theil-Sen method) to reject points. poly_degree_wfit : int Degree for polynomial fit to wavelength calibration. times_sigma_polfilt : float Number of times the (robust) standard deviation around the polynomial fit to reject points. times_sigma_cook : float Number of times the standard deviation of Cook's distances to detect outliers. If zero, this method of outlier detection is ignored. times_sigma_inclusion : float Number of times the (robust) standard deviation around the polynomial fit to include a new line in the set of identified lines. geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- list_of_wvfeatures : list (of WavecalFeature instances) A list of size equal to the number of identified lines, which elements are instances of the class WavecalFeature, containing all the relevant information concerning the line identification. """ ntriplets_master, ratios_master_sorted, triplets_master_sorted_list = \ gen_triplets_master(wv_master=wv_master, geometry=geometry, debugplot=debugplot) list_of_wvfeatures = arccalibration_direct( wv_master=wv_master, ntriplets_master=ntriplets_master, ratios_master_sorted=ratios_master_sorted, triplets_master_sorted_list=triplets_master_sorted_list, xpos_arc=xpos_arc, naxis1_arc=naxis1_arc, crpix1=crpix1, wv_ini_search=wv_ini_search, wv_end_search=wv_end_search, wvmin_useful=wvmin_useful, wvmax_useful=wvmax_useful, error_xpos_arc=error_xpos_arc, times_sigma_r=times_sigma_r, frac_triplets_for_sum=frac_triplets_for_sum, times_sigma_theil_sen=times_sigma_theil_sen, poly_degree_wfit=poly_degree_wfit, times_sigma_polfilt=times_sigma_polfilt, times_sigma_cook=times_sigma_cook, times_sigma_inclusion=times_sigma_inclusion, geometry=geometry, debugplot=debugplot) return list_of_wvfeatures
python
def arccalibration(wv_master, xpos_arc, naxis1_arc, crpix1, wv_ini_search, wv_end_search, wvmin_useful, wvmax_useful, error_xpos_arc, times_sigma_r, frac_triplets_for_sum, times_sigma_theil_sen, poly_degree_wfit, times_sigma_polfilt, times_sigma_cook, times_sigma_inclusion, geometry=None, debugplot=0): """Performs arc line identification for arc calibration. This function is a wrapper of two functions, which are responsible of computing all the relevant information concerning the triplets generated from the master table and the actual identification procedure of the arc lines, respectively. The separation of those computations in two different functions helps to avoid the repetition of calls to the first function when calibrating several arcs using the same master table. Parameters ---------- wv_master : 1d numpy array, float Array with wavelengths corresponding to the master table (Angstroms). xpos_arc : 1d numpy array, float Location of arc lines (pixels). naxis1_arc : int NAXIS1 for arc spectrum. crpix1 : float CRPIX1 value to be employed in the wavelength calibration. wv_ini_search : float Minimum expected wavelength in spectrum. wv_end_search : float Maximum expected wavelength in spectrum. wvmin_useful : float If not None, this value is used to clip detected lines below it. wvmax_useful : float If not None, this value is used to clip detected lines above it. error_xpos_arc : float Error in arc line position (pixels). times_sigma_r : float Times sigma to search for valid line position ratios. frac_triplets_for_sum : float Fraction of distances to different triplets to sum when computing the cost function. times_sigma_theil_sen : float Number of times the (robust) standard deviation around the linear fit (using the Theil-Sen method) to reject points. poly_degree_wfit : int Degree for polynomial fit to wavelength calibration. times_sigma_polfilt : float Number of times the (robust) standard deviation around the polynomial fit to reject points. times_sigma_cook : float Number of times the standard deviation of Cook's distances to detect outliers. If zero, this method of outlier detection is ignored. times_sigma_inclusion : float Number of times the (robust) standard deviation around the polynomial fit to include a new line in the set of identified lines. geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- list_of_wvfeatures : list (of WavecalFeature instances) A list of size equal to the number of identified lines, which elements are instances of the class WavecalFeature, containing all the relevant information concerning the line identification. """ ntriplets_master, ratios_master_sorted, triplets_master_sorted_list = \ gen_triplets_master(wv_master=wv_master, geometry=geometry, debugplot=debugplot) list_of_wvfeatures = arccalibration_direct( wv_master=wv_master, ntriplets_master=ntriplets_master, ratios_master_sorted=ratios_master_sorted, triplets_master_sorted_list=triplets_master_sorted_list, xpos_arc=xpos_arc, naxis1_arc=naxis1_arc, crpix1=crpix1, wv_ini_search=wv_ini_search, wv_end_search=wv_end_search, wvmin_useful=wvmin_useful, wvmax_useful=wvmax_useful, error_xpos_arc=error_xpos_arc, times_sigma_r=times_sigma_r, frac_triplets_for_sum=frac_triplets_for_sum, times_sigma_theil_sen=times_sigma_theil_sen, poly_degree_wfit=poly_degree_wfit, times_sigma_polfilt=times_sigma_polfilt, times_sigma_cook=times_sigma_cook, times_sigma_inclusion=times_sigma_inclusion, geometry=geometry, debugplot=debugplot) return list_of_wvfeatures
[ "def", "arccalibration", "(", "wv_master", ",", "xpos_arc", ",", "naxis1_arc", ",", "crpix1", ",", "wv_ini_search", ",", "wv_end_search", ",", "wvmin_useful", ",", "wvmax_useful", ",", "error_xpos_arc", ",", "times_sigma_r", ",", "frac_triplets_for_sum", ",", "times_sigma_theil_sen", ",", "poly_degree_wfit", ",", "times_sigma_polfilt", ",", "times_sigma_cook", ",", "times_sigma_inclusion", ",", "geometry", "=", "None", ",", "debugplot", "=", "0", ")", ":", "ntriplets_master", ",", "ratios_master_sorted", ",", "triplets_master_sorted_list", "=", "gen_triplets_master", "(", "wv_master", "=", "wv_master", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_of_wvfeatures", "=", "arccalibration_direct", "(", "wv_master", "=", "wv_master", ",", "ntriplets_master", "=", "ntriplets_master", ",", "ratios_master_sorted", "=", "ratios_master_sorted", ",", "triplets_master_sorted_list", "=", "triplets_master_sorted_list", ",", "xpos_arc", "=", "xpos_arc", ",", "naxis1_arc", "=", "naxis1_arc", ",", "crpix1", "=", "crpix1", ",", "wv_ini_search", "=", "wv_ini_search", ",", "wv_end_search", "=", "wv_end_search", ",", "wvmin_useful", "=", "wvmin_useful", ",", "wvmax_useful", "=", "wvmax_useful", ",", "error_xpos_arc", "=", "error_xpos_arc", ",", "times_sigma_r", "=", "times_sigma_r", ",", "frac_triplets_for_sum", "=", "frac_triplets_for_sum", ",", "times_sigma_theil_sen", "=", "times_sigma_theil_sen", ",", "poly_degree_wfit", "=", "poly_degree_wfit", ",", "times_sigma_polfilt", "=", "times_sigma_polfilt", ",", "times_sigma_cook", "=", "times_sigma_cook", ",", "times_sigma_inclusion", "=", "times_sigma_inclusion", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "return", "list_of_wvfeatures" ]
Performs arc line identification for arc calibration. This function is a wrapper of two functions, which are responsible of computing all the relevant information concerning the triplets generated from the master table and the actual identification procedure of the arc lines, respectively. The separation of those computations in two different functions helps to avoid the repetition of calls to the first function when calibrating several arcs using the same master table. Parameters ---------- wv_master : 1d numpy array, float Array with wavelengths corresponding to the master table (Angstroms). xpos_arc : 1d numpy array, float Location of arc lines (pixels). naxis1_arc : int NAXIS1 for arc spectrum. crpix1 : float CRPIX1 value to be employed in the wavelength calibration. wv_ini_search : float Minimum expected wavelength in spectrum. wv_end_search : float Maximum expected wavelength in spectrum. wvmin_useful : float If not None, this value is used to clip detected lines below it. wvmax_useful : float If not None, this value is used to clip detected lines above it. error_xpos_arc : float Error in arc line position (pixels). times_sigma_r : float Times sigma to search for valid line position ratios. frac_triplets_for_sum : float Fraction of distances to different triplets to sum when computing the cost function. times_sigma_theil_sen : float Number of times the (robust) standard deviation around the linear fit (using the Theil-Sen method) to reject points. poly_degree_wfit : int Degree for polynomial fit to wavelength calibration. times_sigma_polfilt : float Number of times the (robust) standard deviation around the polynomial fit to reject points. times_sigma_cook : float Number of times the standard deviation of Cook's distances to detect outliers. If zero, this method of outlier detection is ignored. times_sigma_inclusion : float Number of times the (robust) standard deviation around the polynomial fit to include a new line in the set of identified lines. geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the window geometry. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- list_of_wvfeatures : list (of WavecalFeature instances) A list of size equal to the number of identified lines, which elements are instances of the class WavecalFeature, containing all the relevant information concerning the line identification.
[ "Performs", "arc", "line", "identification", "for", "arc", "calibration", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L428-L543
train
guaix-ucm/numina
numina/array/wavecalib/arccalibration.py
match_wv_arrays
def match_wv_arrays(wv_master, wv_expected_all_peaks, delta_wv_max): """Match two lists with wavelengths. Assign individual wavelengths from wv_master to each expected wavelength when the latter is within the maximum allowed range. Parameters ---------- wv_master : numpy array Array containing the master wavelengths. wv_expected_all_peaks : numpy array Array containing the expected wavelengths (computed, for example, from an approximate polynomial calibration applied to the location of the line peaks). delta_wv_max : float Maximum distance to accept that the master wavelength corresponds to the expected wavelength. Returns ------- wv_verified_all_peaks : numpy array Verified wavelengths from master list. """ # initialize the output array to zero wv_verified_all_peaks = np.zeros_like(wv_expected_all_peaks) # initialize to True array to indicate that no peak has already # been verified (this flag avoids duplication) wv_unused = np.ones_like(wv_expected_all_peaks, dtype=bool) # initialize to np.infty array to store minimum distance to already # identified line minimum_delta_wv = np.ones_like(wv_expected_all_peaks, dtype=float) minimum_delta_wv *= np.infty # since it is likely that len(wv_master) < len(wv_expected_all_peaks), # it is more convenient to execute the search in the following order for i in range(len(wv_master)): j = np.searchsorted(wv_expected_all_peaks, wv_master[i]) if j == 0: delta_wv = abs(wv_master[i] - wv_expected_all_peaks[j]) if delta_wv < delta_wv_max: if wv_unused[j]: wv_verified_all_peaks[j] = wv_master[i] wv_unused[j] = False minimum_delta_wv[j] = delta_wv else: if delta_wv < minimum_delta_wv[j]: wv_verified_all_peaks[j] = wv_master[i] minimum_delta_wv[j] = delta_wv elif j == len(wv_expected_all_peaks): delta_wv = abs(wv_master[i] - wv_expected_all_peaks[j-1]) if delta_wv < delta_wv_max: if wv_unused[j-1]: wv_verified_all_peaks[j-1] = wv_master[i] wv_unused[j-1] = False else: if delta_wv < minimum_delta_wv[j-1]: wv_verified_all_peaks[j-1] = wv_master[i] else: delta_wv1 = abs(wv_master[i] - wv_expected_all_peaks[j-1]) delta_wv2 = abs(wv_master[i] - wv_expected_all_peaks[j]) if delta_wv1 < delta_wv2: if delta_wv1 < delta_wv_max: if wv_unused[j-1]: wv_verified_all_peaks[j-1] = wv_master[i] wv_unused[j-1] = False else: if delta_wv1 < minimum_delta_wv[j-1]: wv_verified_all_peaks[j-1] = wv_master[i] else: if delta_wv2 < delta_wv_max: if wv_unused[j]: wv_verified_all_peaks[j] = wv_master[i] wv_unused[j] = False else: if delta_wv2 < minimum_delta_wv[j]: wv_verified_all_peaks[j] = wv_master[i] return wv_verified_all_peaks
python
def match_wv_arrays(wv_master, wv_expected_all_peaks, delta_wv_max): """Match two lists with wavelengths. Assign individual wavelengths from wv_master to each expected wavelength when the latter is within the maximum allowed range. Parameters ---------- wv_master : numpy array Array containing the master wavelengths. wv_expected_all_peaks : numpy array Array containing the expected wavelengths (computed, for example, from an approximate polynomial calibration applied to the location of the line peaks). delta_wv_max : float Maximum distance to accept that the master wavelength corresponds to the expected wavelength. Returns ------- wv_verified_all_peaks : numpy array Verified wavelengths from master list. """ # initialize the output array to zero wv_verified_all_peaks = np.zeros_like(wv_expected_all_peaks) # initialize to True array to indicate that no peak has already # been verified (this flag avoids duplication) wv_unused = np.ones_like(wv_expected_all_peaks, dtype=bool) # initialize to np.infty array to store minimum distance to already # identified line minimum_delta_wv = np.ones_like(wv_expected_all_peaks, dtype=float) minimum_delta_wv *= np.infty # since it is likely that len(wv_master) < len(wv_expected_all_peaks), # it is more convenient to execute the search in the following order for i in range(len(wv_master)): j = np.searchsorted(wv_expected_all_peaks, wv_master[i]) if j == 0: delta_wv = abs(wv_master[i] - wv_expected_all_peaks[j]) if delta_wv < delta_wv_max: if wv_unused[j]: wv_verified_all_peaks[j] = wv_master[i] wv_unused[j] = False minimum_delta_wv[j] = delta_wv else: if delta_wv < minimum_delta_wv[j]: wv_verified_all_peaks[j] = wv_master[i] minimum_delta_wv[j] = delta_wv elif j == len(wv_expected_all_peaks): delta_wv = abs(wv_master[i] - wv_expected_all_peaks[j-1]) if delta_wv < delta_wv_max: if wv_unused[j-1]: wv_verified_all_peaks[j-1] = wv_master[i] wv_unused[j-1] = False else: if delta_wv < minimum_delta_wv[j-1]: wv_verified_all_peaks[j-1] = wv_master[i] else: delta_wv1 = abs(wv_master[i] - wv_expected_all_peaks[j-1]) delta_wv2 = abs(wv_master[i] - wv_expected_all_peaks[j]) if delta_wv1 < delta_wv2: if delta_wv1 < delta_wv_max: if wv_unused[j-1]: wv_verified_all_peaks[j-1] = wv_master[i] wv_unused[j-1] = False else: if delta_wv1 < minimum_delta_wv[j-1]: wv_verified_all_peaks[j-1] = wv_master[i] else: if delta_wv2 < delta_wv_max: if wv_unused[j]: wv_verified_all_peaks[j] = wv_master[i] wv_unused[j] = False else: if delta_wv2 < minimum_delta_wv[j]: wv_verified_all_peaks[j] = wv_master[i] return wv_verified_all_peaks
[ "def", "match_wv_arrays", "(", "wv_master", ",", "wv_expected_all_peaks", ",", "delta_wv_max", ")", ":", "# initialize the output array to zero", "wv_verified_all_peaks", "=", "np", ".", "zeros_like", "(", "wv_expected_all_peaks", ")", "# initialize to True array to indicate that no peak has already", "# been verified (this flag avoids duplication)", "wv_unused", "=", "np", ".", "ones_like", "(", "wv_expected_all_peaks", ",", "dtype", "=", "bool", ")", "# initialize to np.infty array to store minimum distance to already", "# identified line", "minimum_delta_wv", "=", "np", ".", "ones_like", "(", "wv_expected_all_peaks", ",", "dtype", "=", "float", ")", "minimum_delta_wv", "*=", "np", ".", "infty", "# since it is likely that len(wv_master) < len(wv_expected_all_peaks),", "# it is more convenient to execute the search in the following order", "for", "i", "in", "range", "(", "len", "(", "wv_master", ")", ")", ":", "j", "=", "np", ".", "searchsorted", "(", "wv_expected_all_peaks", ",", "wv_master", "[", "i", "]", ")", "if", "j", "==", "0", ":", "delta_wv", "=", "abs", "(", "wv_master", "[", "i", "]", "-", "wv_expected_all_peaks", "[", "j", "]", ")", "if", "delta_wv", "<", "delta_wv_max", ":", "if", "wv_unused", "[", "j", "]", ":", "wv_verified_all_peaks", "[", "j", "]", "=", "wv_master", "[", "i", "]", "wv_unused", "[", "j", "]", "=", "False", "minimum_delta_wv", "[", "j", "]", "=", "delta_wv", "else", ":", "if", "delta_wv", "<", "minimum_delta_wv", "[", "j", "]", ":", "wv_verified_all_peaks", "[", "j", "]", "=", "wv_master", "[", "i", "]", "minimum_delta_wv", "[", "j", "]", "=", "delta_wv", "elif", "j", "==", "len", "(", "wv_expected_all_peaks", ")", ":", "delta_wv", "=", "abs", "(", "wv_master", "[", "i", "]", "-", "wv_expected_all_peaks", "[", "j", "-", "1", "]", ")", "if", "delta_wv", "<", "delta_wv_max", ":", "if", "wv_unused", "[", "j", "-", "1", "]", ":", "wv_verified_all_peaks", "[", "j", "-", "1", "]", "=", "wv_master", "[", "i", "]", "wv_unused", "[", "j", "-", "1", "]", "=", "False", "else", ":", "if", "delta_wv", "<", "minimum_delta_wv", "[", "j", "-", "1", "]", ":", "wv_verified_all_peaks", "[", "j", "-", "1", "]", "=", "wv_master", "[", "i", "]", "else", ":", "delta_wv1", "=", "abs", "(", "wv_master", "[", "i", "]", "-", "wv_expected_all_peaks", "[", "j", "-", "1", "]", ")", "delta_wv2", "=", "abs", "(", "wv_master", "[", "i", "]", "-", "wv_expected_all_peaks", "[", "j", "]", ")", "if", "delta_wv1", "<", "delta_wv2", ":", "if", "delta_wv1", "<", "delta_wv_max", ":", "if", "wv_unused", "[", "j", "-", "1", "]", ":", "wv_verified_all_peaks", "[", "j", "-", "1", "]", "=", "wv_master", "[", "i", "]", "wv_unused", "[", "j", "-", "1", "]", "=", "False", "else", ":", "if", "delta_wv1", "<", "minimum_delta_wv", "[", "j", "-", "1", "]", ":", "wv_verified_all_peaks", "[", "j", "-", "1", "]", "=", "wv_master", "[", "i", "]", "else", ":", "if", "delta_wv2", "<", "delta_wv_max", ":", "if", "wv_unused", "[", "j", "]", ":", "wv_verified_all_peaks", "[", "j", "]", "=", "wv_master", "[", "i", "]", "wv_unused", "[", "j", "]", "=", "False", "else", ":", "if", "delta_wv2", "<", "minimum_delta_wv", "[", "j", "]", ":", "wv_verified_all_peaks", "[", "j", "]", "=", "wv_master", "[", "i", "]", "return", "wv_verified_all_peaks" ]
Match two lists with wavelengths. Assign individual wavelengths from wv_master to each expected wavelength when the latter is within the maximum allowed range. Parameters ---------- wv_master : numpy array Array containing the master wavelengths. wv_expected_all_peaks : numpy array Array containing the expected wavelengths (computed, for example, from an approximate polynomial calibration applied to the location of the line peaks). delta_wv_max : float Maximum distance to accept that the master wavelength corresponds to the expected wavelength. Returns ------- wv_verified_all_peaks : numpy array Verified wavelengths from master list.
[ "Match", "two", "lists", "with", "wavelengths", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/arccalibration.py#L1519-L1600
train
guaix-ucm/numina
numina/array/display/matplotlib_qt.py
set_window_geometry
def set_window_geometry(geometry): """Set window geometry. Parameters ========== geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the Qt backend geometry. """ if geometry is not None: x_geom, y_geom, dx_geom, dy_geom = geometry mngr = plt.get_current_fig_manager() if 'window' in dir(mngr): try: mngr.window.setGeometry(x_geom, y_geom, dx_geom, dy_geom) except AttributeError: pass else: pass
python
def set_window_geometry(geometry): """Set window geometry. Parameters ========== geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the Qt backend geometry. """ if geometry is not None: x_geom, y_geom, dx_geom, dy_geom = geometry mngr = plt.get_current_fig_manager() if 'window' in dir(mngr): try: mngr.window.setGeometry(x_geom, y_geom, dx_geom, dy_geom) except AttributeError: pass else: pass
[ "def", "set_window_geometry", "(", "geometry", ")", ":", "if", "geometry", "is", "not", "None", ":", "x_geom", ",", "y_geom", ",", "dx_geom", ",", "dy_geom", "=", "geometry", "mngr", "=", "plt", ".", "get_current_fig_manager", "(", ")", "if", "'window'", "in", "dir", "(", "mngr", ")", ":", "try", ":", "mngr", ".", "window", ".", "setGeometry", "(", "x_geom", ",", "y_geom", ",", "dx_geom", ",", "dy_geom", ")", "except", "AttributeError", ":", "pass", "else", ":", "pass" ]
Set window geometry. Parameters ========== geometry : tuple (4 integers) or None x, y, dx, dy values employed to set the Qt backend geometry.
[ "Set", "window", "geometry", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/matplotlib_qt.py#L8-L27
train
arkottke/pysra
pysra/tools.py
parse_fixed_width
def parse_fixed_width(types, lines): """Parse a fixed width line.""" values = [] line = [] for width, parser in types: if not line: line = lines.pop(0).replace('\n', '') values.append(parser(line[:width])) line = line[width:] return values
python
def parse_fixed_width(types, lines): """Parse a fixed width line.""" values = [] line = [] for width, parser in types: if not line: line = lines.pop(0).replace('\n', '') values.append(parser(line[:width])) line = line[width:] return values
[ "def", "parse_fixed_width", "(", "types", ",", "lines", ")", ":", "values", "=", "[", "]", "line", "=", "[", "]", "for", "width", ",", "parser", "in", "types", ":", "if", "not", "line", ":", "line", "=", "lines", ".", "pop", "(", "0", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", "values", ".", "append", "(", "parser", "(", "line", "[", ":", "width", "]", ")", ")", "line", "=", "line", "[", "width", ":", "]", "return", "values" ]
Parse a fixed width line.
[ "Parse", "a", "fixed", "width", "line", "." ]
c72fd389d6c15203c0c00728ac00f101bae6369d
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L48-L59
train
arkottke/pysra
pysra/tools.py
_parse_curves
def _parse_curves(block, **kwargs): """Parse nonlinear curves block.""" count = int(block.pop(0)) curves = [] for i in range(count): for param in ['mod_reduc', 'damping']: length, name = parse_fixed_width([(5, int), (65, to_str)], block) curves.append( site.NonlinearProperty( name, parse_fixed_width(length * [(10, float)], block), parse_fixed_width(length * [(10, float)], block), param)) length = int(block[0][:5]) soil_types = parse_fixed_width((length + 1) * [(5, int)], block)[1:] # Group soil type number and curves together return {(soil_types[i // 2], c.param): c for i, c in enumerate(curves)}
python
def _parse_curves(block, **kwargs): """Parse nonlinear curves block.""" count = int(block.pop(0)) curves = [] for i in range(count): for param in ['mod_reduc', 'damping']: length, name = parse_fixed_width([(5, int), (65, to_str)], block) curves.append( site.NonlinearProperty( name, parse_fixed_width(length * [(10, float)], block), parse_fixed_width(length * [(10, float)], block), param)) length = int(block[0][:5]) soil_types = parse_fixed_width((length + 1) * [(5, int)], block)[1:] # Group soil type number and curves together return {(soil_types[i // 2], c.param): c for i, c in enumerate(curves)}
[ "def", "_parse_curves", "(", "block", ",", "*", "*", "kwargs", ")", ":", "count", "=", "int", "(", "block", ".", "pop", "(", "0", ")", ")", "curves", "=", "[", "]", "for", "i", "in", "range", "(", "count", ")", ":", "for", "param", "in", "[", "'mod_reduc'", ",", "'damping'", "]", ":", "length", ",", "name", "=", "parse_fixed_width", "(", "[", "(", "5", ",", "int", ")", ",", "(", "65", ",", "to_str", ")", "]", ",", "block", ")", "curves", ".", "append", "(", "site", ".", "NonlinearProperty", "(", "name", ",", "parse_fixed_width", "(", "length", "*", "[", "(", "10", ",", "float", ")", "]", ",", "block", ")", ",", "parse_fixed_width", "(", "length", "*", "[", "(", "10", ",", "float", ")", "]", ",", "block", ")", ",", "param", ")", ")", "length", "=", "int", "(", "block", "[", "0", "]", "[", ":", "5", "]", ")", "soil_types", "=", "parse_fixed_width", "(", "(", "length", "+", "1", ")", "*", "[", "(", "5", ",", "int", ")", "]", ",", "block", ")", "[", "1", ":", "]", "# Group soil type number and curves together", "return", "{", "(", "soil_types", "[", "i", "//", "2", "]", ",", "c", ".", "param", ")", ":", "c", "for", "i", ",", "c", "in", "enumerate", "(", "curves", ")", "}" ]
Parse nonlinear curves block.
[ "Parse", "nonlinear", "curves", "block", "." ]
c72fd389d6c15203c0c00728ac00f101bae6369d
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L62-L80
train
arkottke/pysra
pysra/tools.py
_parse_soil_profile
def _parse_soil_profile(block, units, curves, **kwargs): """Parse soil profile block.""" wt_layer, length, _, name = parse_fixed_width( 3 * [(5, int)] + [(55, to_str)], block) layers = [] soil_types = [] for i in range(length): index, soil_idx, thickness, shear_mod, damping, unit_wt, shear_vel = \ parse_fixed_width( [(5, int), (5, int), (15, to_float)] + 4 * [(10, to_float)], block ) st = site.SoilType( soil_idx, unit_wt, curves[(soil_idx, 'mod_reduc')], curves[(soil_idx, 'damping')], ) try: # Try to find previously added soil type st = soil_types[soil_types.index(st)] except ValueError: soil_types.append(st) layers.append(site.Layer(st, thickness, shear_vel)) if units == 'english': # Convert from English to metric for st in soil_types: st.unit_wt *= 0.00015708746 for l in layers: l.thickness *= 0.3048 l.shear_vel *= 0.3048 p = site.Profile(layers) p.update_layers() p.wt_depth = p[wt_layer - 1].depth return p
python
def _parse_soil_profile(block, units, curves, **kwargs): """Parse soil profile block.""" wt_layer, length, _, name = parse_fixed_width( 3 * [(5, int)] + [(55, to_str)], block) layers = [] soil_types = [] for i in range(length): index, soil_idx, thickness, shear_mod, damping, unit_wt, shear_vel = \ parse_fixed_width( [(5, int), (5, int), (15, to_float)] + 4 * [(10, to_float)], block ) st = site.SoilType( soil_idx, unit_wt, curves[(soil_idx, 'mod_reduc')], curves[(soil_idx, 'damping')], ) try: # Try to find previously added soil type st = soil_types[soil_types.index(st)] except ValueError: soil_types.append(st) layers.append(site.Layer(st, thickness, shear_vel)) if units == 'english': # Convert from English to metric for st in soil_types: st.unit_wt *= 0.00015708746 for l in layers: l.thickness *= 0.3048 l.shear_vel *= 0.3048 p = site.Profile(layers) p.update_layers() p.wt_depth = p[wt_layer - 1].depth return p
[ "def", "_parse_soil_profile", "(", "block", ",", "units", ",", "curves", ",", "*", "*", "kwargs", ")", ":", "wt_layer", ",", "length", ",", "_", ",", "name", "=", "parse_fixed_width", "(", "3", "*", "[", "(", "5", ",", "int", ")", "]", "+", "[", "(", "55", ",", "to_str", ")", "]", ",", "block", ")", "layers", "=", "[", "]", "soil_types", "=", "[", "]", "for", "i", "in", "range", "(", "length", ")", ":", "index", ",", "soil_idx", ",", "thickness", ",", "shear_mod", ",", "damping", ",", "unit_wt", ",", "shear_vel", "=", "parse_fixed_width", "(", "[", "(", "5", ",", "int", ")", ",", "(", "5", ",", "int", ")", ",", "(", "15", ",", "to_float", ")", "]", "+", "4", "*", "[", "(", "10", ",", "to_float", ")", "]", ",", "block", ")", "st", "=", "site", ".", "SoilType", "(", "soil_idx", ",", "unit_wt", ",", "curves", "[", "(", "soil_idx", ",", "'mod_reduc'", ")", "]", ",", "curves", "[", "(", "soil_idx", ",", "'damping'", ")", "]", ",", ")", "try", ":", "# Try to find previously added soil type", "st", "=", "soil_types", "[", "soil_types", ".", "index", "(", "st", ")", "]", "except", "ValueError", ":", "soil_types", ".", "append", "(", "st", ")", "layers", ".", "append", "(", "site", ".", "Layer", "(", "st", ",", "thickness", ",", "shear_vel", ")", ")", "if", "units", "==", "'english'", ":", "# Convert from English to metric", "for", "st", "in", "soil_types", ":", "st", ".", "unit_wt", "*=", "0.00015708746", "for", "l", "in", "layers", ":", "l", ".", "thickness", "*=", "0.3048", "l", ".", "shear_vel", "*=", "0.3048", "p", "=", "site", ".", "Profile", "(", "layers", ")", "p", ".", "update_layers", "(", ")", "p", ".", "wt_depth", "=", "p", "[", "wt_layer", "-", "1", "]", ".", "depth", "return", "p" ]
Parse soil profile block.
[ "Parse", "soil", "profile", "block", "." ]
c72fd389d6c15203c0c00728ac00f101bae6369d
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L83-L123
train
arkottke/pysra
pysra/tools.py
_parse_motion
def _parse_motion(block, **kwargs): """Parse motin specification block.""" _, fa_length, time_step, name, fmt = parse_fixed_width( [(5, int), (5, int), (10, float), (30, to_str), (30, to_str)], block) scale, pga, _, header_lines, _ = parse_fixed_width( 3 * [(10, to_float)] + 2 * [(5, int)], block) m = re.search(r'(\d+)\w(\d+)\.\d+', fmt) count_per_line = int(m.group(1)) width = int(m.group(2)) fname = os.path.join(os.path.dirname(kwargs['fname']), name) accels = np.genfromtxt( fname, delimiter=(count_per_line * [width]), skip_header=header_lines, ) if np.isfinite(scale): pass elif np.isfinite(pga): scale = pga / np.abs(accels).max() else: scale = 1. accels *= scale m = motion.TimeSeriesMotion(fname, '', time_step, accels, fa_length) return m
python
def _parse_motion(block, **kwargs): """Parse motin specification block.""" _, fa_length, time_step, name, fmt = parse_fixed_width( [(5, int), (5, int), (10, float), (30, to_str), (30, to_str)], block) scale, pga, _, header_lines, _ = parse_fixed_width( 3 * [(10, to_float)] + 2 * [(5, int)], block) m = re.search(r'(\d+)\w(\d+)\.\d+', fmt) count_per_line = int(m.group(1)) width = int(m.group(2)) fname = os.path.join(os.path.dirname(kwargs['fname']), name) accels = np.genfromtxt( fname, delimiter=(count_per_line * [width]), skip_header=header_lines, ) if np.isfinite(scale): pass elif np.isfinite(pga): scale = pga / np.abs(accels).max() else: scale = 1. accels *= scale m = motion.TimeSeriesMotion(fname, '', time_step, accels, fa_length) return m
[ "def", "_parse_motion", "(", "block", ",", "*", "*", "kwargs", ")", ":", "_", ",", "fa_length", ",", "time_step", ",", "name", ",", "fmt", "=", "parse_fixed_width", "(", "[", "(", "5", ",", "int", ")", ",", "(", "5", ",", "int", ")", ",", "(", "10", ",", "float", ")", ",", "(", "30", ",", "to_str", ")", ",", "(", "30", ",", "to_str", ")", "]", ",", "block", ")", "scale", ",", "pga", ",", "_", ",", "header_lines", ",", "_", "=", "parse_fixed_width", "(", "3", "*", "[", "(", "10", ",", "to_float", ")", "]", "+", "2", "*", "[", "(", "5", ",", "int", ")", "]", ",", "block", ")", "m", "=", "re", ".", "search", "(", "r'(\\d+)\\w(\\d+)\\.\\d+'", ",", "fmt", ")", "count_per_line", "=", "int", "(", "m", ".", "group", "(", "1", ")", ")", "width", "=", "int", "(", "m", ".", "group", "(", "2", ")", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "kwargs", "[", "'fname'", "]", ")", ",", "name", ")", "accels", "=", "np", ".", "genfromtxt", "(", "fname", ",", "delimiter", "=", "(", "count_per_line", "*", "[", "width", "]", ")", ",", "skip_header", "=", "header_lines", ",", ")", "if", "np", ".", "isfinite", "(", "scale", ")", ":", "pass", "elif", "np", ".", "isfinite", "(", "pga", ")", ":", "scale", "=", "pga", "/", "np", ".", "abs", "(", "accels", ")", ".", "max", "(", ")", "else", ":", "scale", "=", "1.", "accels", "*=", "scale", "m", "=", "motion", ".", "TimeSeriesMotion", "(", "fname", ",", "''", ",", "time_step", ",", "accels", ",", "fa_length", ")", "return", "m" ]
Parse motin specification block.
[ "Parse", "motin", "specification", "block", "." ]
c72fd389d6c15203c0c00728ac00f101bae6369d
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L126-L154
train
arkottke/pysra
pysra/tools.py
_parse_input_loc
def _parse_input_loc(block, profile, **kwargs): """Parse input location block.""" layer, wave_field = parse_fixed_width(2 * [(5, int)], block) return profile.location( motion.WaveField[wave_field], index=(layer - 1), )
python
def _parse_input_loc(block, profile, **kwargs): """Parse input location block.""" layer, wave_field = parse_fixed_width(2 * [(5, int)], block) return profile.location( motion.WaveField[wave_field], index=(layer - 1), )
[ "def", "_parse_input_loc", "(", "block", ",", "profile", ",", "*", "*", "kwargs", ")", ":", "layer", ",", "wave_field", "=", "parse_fixed_width", "(", "2", "*", "[", "(", "5", ",", "int", ")", "]", ",", "block", ")", "return", "profile", ".", "location", "(", "motion", ".", "WaveField", "[", "wave_field", "]", ",", "index", "=", "(", "layer", "-", "1", ")", ",", ")" ]
Parse input location block.
[ "Parse", "input", "location", "block", "." ]
c72fd389d6c15203c0c00728ac00f101bae6369d
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L157-L163
train
arkottke/pysra
pysra/tools.py
_parse_run_control
def _parse_run_control(block): """Parse run control block.""" _, max_iterations, strain_ratio, _, _ = parse_fixed_width( 2 * [(5, int)] + [(10, float)] + 2 * [(5, int)], block) return propagation.EquivalentLinearCalculation( strain_ratio, max_iterations, tolerance=10.)
python
def _parse_run_control(block): """Parse run control block.""" _, max_iterations, strain_ratio, _, _ = parse_fixed_width( 2 * [(5, int)] + [(10, float)] + 2 * [(5, int)], block) return propagation.EquivalentLinearCalculation( strain_ratio, max_iterations, tolerance=10.)
[ "def", "_parse_run_control", "(", "block", ")", ":", "_", ",", "max_iterations", ",", "strain_ratio", ",", "_", ",", "_", "=", "parse_fixed_width", "(", "2", "*", "[", "(", "5", ",", "int", ")", "]", "+", "[", "(", "10", ",", "float", ")", "]", "+", "2", "*", "[", "(", "5", ",", "int", ")", "]", ",", "block", ")", "return", "propagation", ".", "EquivalentLinearCalculation", "(", "strain_ratio", ",", "max_iterations", ",", "tolerance", "=", "10.", ")" ]
Parse run control block.
[ "Parse", "run", "control", "block", "." ]
c72fd389d6c15203c0c00728ac00f101bae6369d
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/tools.py#L166-L172
train
guaix-ucm/numina
numina/array/blocks.py
blockgen1d
def blockgen1d(block, size): """Compute 1d block intervals to be used by combine. blockgen1d computes the slices by recursively halving the initial interval (0, size) by 2 until its size is lesser or equal than block :param block: an integer maximum block size :param size: original size of the interval, it corresponds to a 0:size slice :return: a list of slices Example: >>> blockgen1d(512, 1024) [slice(0, 512, None), slice(512, 1024, None)] """ def numblock(blk, x): """Compute recursively the numeric intervals """ a, b = x if b - a <= blk: return [x] else: result = [] d = int(b - a) // 2 for i in imap(numblock, [blk, blk], [(a, a + d), (a + d, b)]): result.extend(i) return result return [slice(*l) for l in numblock(block, (0, size))]
python
def blockgen1d(block, size): """Compute 1d block intervals to be used by combine. blockgen1d computes the slices by recursively halving the initial interval (0, size) by 2 until its size is lesser or equal than block :param block: an integer maximum block size :param size: original size of the interval, it corresponds to a 0:size slice :return: a list of slices Example: >>> blockgen1d(512, 1024) [slice(0, 512, None), slice(512, 1024, None)] """ def numblock(blk, x): """Compute recursively the numeric intervals """ a, b = x if b - a <= blk: return [x] else: result = [] d = int(b - a) // 2 for i in imap(numblock, [blk, blk], [(a, a + d), (a + d, b)]): result.extend(i) return result return [slice(*l) for l in numblock(block, (0, size))]
[ "def", "blockgen1d", "(", "block", ",", "size", ")", ":", "def", "numblock", "(", "blk", ",", "x", ")", ":", "\"\"\"Compute recursively the numeric intervals\n\n \"\"\"", "a", ",", "b", "=", "x", "if", "b", "-", "a", "<=", "blk", ":", "return", "[", "x", "]", "else", ":", "result", "=", "[", "]", "d", "=", "int", "(", "b", "-", "a", ")", "//", "2", "for", "i", "in", "imap", "(", "numblock", ",", "[", "blk", ",", "blk", "]", ",", "[", "(", "a", ",", "a", "+", "d", ")", ",", "(", "a", "+", "d", ",", "b", ")", "]", ")", ":", "result", ".", "extend", "(", "i", ")", "return", "result", "return", "[", "slice", "(", "*", "l", ")", "for", "l", "in", "numblock", "(", "block", ",", "(", "0", ",", "size", ")", ")", "]" ]
Compute 1d block intervals to be used by combine. blockgen1d computes the slices by recursively halving the initial interval (0, size) by 2 until its size is lesser or equal than block :param block: an integer maximum block size :param size: original size of the interval, it corresponds to a 0:size slice :return: a list of slices Example: >>> blockgen1d(512, 1024) [slice(0, 512, None), slice(512, 1024, None)]
[ "Compute", "1d", "block", "intervals", "to", "be", "used", "by", "combine", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L23-L53
train
guaix-ucm/numina
numina/array/blocks.py
blockgen
def blockgen(blocks, shape): """Generate a list of slice tuples to be used by combine. The tuples represent regions in an N-dimensional image. :param blocks: a tuple of block sizes :param shape: the shape of the n-dimensional array :return: an iterator to the list of tuples of slices Example: >>> blocks = (500, 512) >>> shape = (1040, 1024) >>> for i in blockgen(blocks, shape): ... print i (slice(0, 260, None), slice(0, 512, None)) (slice(0, 260, None), slice(512, 1024, None)) (slice(260, 520, None), slice(0, 512, None)) (slice(260, 520, None), slice(512, 1024, None)) (slice(520, 780, None), slice(0, 512, None)) (slice(520, 780, None), slice(512, 1024, None)) (slice(780, 1040, None), slice(0, 512, None)) (slice(780, 1040, None), slice(512, 1024, None)) """ iterables = [blockgen1d(l, s) for (l, s) in zip(blocks, shape)] return product(*iterables)
python
def blockgen(blocks, shape): """Generate a list of slice tuples to be used by combine. The tuples represent regions in an N-dimensional image. :param blocks: a tuple of block sizes :param shape: the shape of the n-dimensional array :return: an iterator to the list of tuples of slices Example: >>> blocks = (500, 512) >>> shape = (1040, 1024) >>> for i in blockgen(blocks, shape): ... print i (slice(0, 260, None), slice(0, 512, None)) (slice(0, 260, None), slice(512, 1024, None)) (slice(260, 520, None), slice(0, 512, None)) (slice(260, 520, None), slice(512, 1024, None)) (slice(520, 780, None), slice(0, 512, None)) (slice(520, 780, None), slice(512, 1024, None)) (slice(780, 1040, None), slice(0, 512, None)) (slice(780, 1040, None), slice(512, 1024, None)) """ iterables = [blockgen1d(l, s) for (l, s) in zip(blocks, shape)] return product(*iterables)
[ "def", "blockgen", "(", "blocks", ",", "shape", ")", ":", "iterables", "=", "[", "blockgen1d", "(", "l", ",", "s", ")", "for", "(", "l", ",", "s", ")", "in", "zip", "(", "blocks", ",", "shape", ")", "]", "return", "product", "(", "*", "iterables", ")" ]
Generate a list of slice tuples to be used by combine. The tuples represent regions in an N-dimensional image. :param blocks: a tuple of block sizes :param shape: the shape of the n-dimensional array :return: an iterator to the list of tuples of slices Example: >>> blocks = (500, 512) >>> shape = (1040, 1024) >>> for i in blockgen(blocks, shape): ... print i (slice(0, 260, None), slice(0, 512, None)) (slice(0, 260, None), slice(512, 1024, None)) (slice(260, 520, None), slice(0, 512, None)) (slice(260, 520, None), slice(512, 1024, None)) (slice(520, 780, None), slice(0, 512, None)) (slice(520, 780, None), slice(512, 1024, None)) (slice(780, 1040, None), slice(0, 512, None)) (slice(780, 1040, None), slice(512, 1024, None))
[ "Generate", "a", "list", "of", "slice", "tuples", "to", "be", "used", "by", "combine", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L56-L82
train
guaix-ucm/numina
numina/array/blocks.py
blk_coverage_1d
def blk_coverage_1d(blk, size): """Return the part of a 1d array covered by a block. :param blk: size of the 1d block :param size: size of the 1d a image :return: a tuple of size covered and remaining size Example: >>> blk_coverage_1d(7, 100) (98, 2) """ rem = size % blk maxpix = size - rem return maxpix, rem
python
def blk_coverage_1d(blk, size): """Return the part of a 1d array covered by a block. :param blk: size of the 1d block :param size: size of the 1d a image :return: a tuple of size covered and remaining size Example: >>> blk_coverage_1d(7, 100) (98, 2) """ rem = size % blk maxpix = size - rem return maxpix, rem
[ "def", "blk_coverage_1d", "(", "blk", ",", "size", ")", ":", "rem", "=", "size", "%", "blk", "maxpix", "=", "size", "-", "rem", "return", "maxpix", ",", "rem" ]
Return the part of a 1d array covered by a block. :param blk: size of the 1d block :param size: size of the 1d a image :return: a tuple of size covered and remaining size Example: >>> blk_coverage_1d(7, 100) (98, 2)
[ "Return", "the", "part", "of", "a", "1d", "array", "covered", "by", "a", "block", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L85-L100
train
guaix-ucm/numina
numina/array/blocks.py
max_blk_coverage
def max_blk_coverage(blk, shape): """Return the maximum shape of an array covered by a block. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: the shape of the covered region Example: >>> max_blk_coverage(blk=(7, 6), shape=(100, 43)) (98, 42) """ return tuple(blk_coverage_1d(b, s)[0] for b, s in zip(blk, shape))
python
def max_blk_coverage(blk, shape): """Return the maximum shape of an array covered by a block. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: the shape of the covered region Example: >>> max_blk_coverage(blk=(7, 6), shape=(100, 43)) (98, 42) """ return tuple(blk_coverage_1d(b, s)[0] for b, s in zip(blk, shape))
[ "def", "max_blk_coverage", "(", "blk", ",", "shape", ")", ":", "return", "tuple", "(", "blk_coverage_1d", "(", "b", ",", "s", ")", "[", "0", "]", "for", "b", ",", "s", "in", "zip", "(", "blk", ",", "shape", ")", ")" ]
Return the maximum shape of an array covered by a block. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: the shape of the covered region Example: >>> max_blk_coverage(blk=(7, 6), shape=(100, 43)) (98, 42)
[ "Return", "the", "maximum", "shape", "of", "an", "array", "covered", "by", "a", "block", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L103-L117
train
guaix-ucm/numina
numina/array/blocks.py
blk_nd_short
def blk_nd_short(blk, shape): """Iterate trough the blocks that strictly cover an array. Iterate trough the blocks that recover the part of the array given by max_blk_coverage. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: a generator that yields the blocks Example: >>> result = list(blk_nd_short(blk=(5,3), shape=(11, 11))) >>> result[0] (slice(0, 5, None), slice(0, 3, None)) >>> result[1] (slice(0, 5, None), slice(3, 6, None)) >>> result[-1] (slice(5, 10, None), slice(6, 9, None)) In this case, the output of max_blk_coverage is (10, 9), so only this part of the array is covered .. seealso:: :py:func:`blk_nd` Yields blocks of blk size until the remaining part is smaller than `blk` and the yields smaller blocks. """ internals = (blk_1d_short(b, s) for b, s in zip(blk, shape)) return product(*internals)
python
def blk_nd_short(blk, shape): """Iterate trough the blocks that strictly cover an array. Iterate trough the blocks that recover the part of the array given by max_blk_coverage. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: a generator that yields the blocks Example: >>> result = list(blk_nd_short(blk=(5,3), shape=(11, 11))) >>> result[0] (slice(0, 5, None), slice(0, 3, None)) >>> result[1] (slice(0, 5, None), slice(3, 6, None)) >>> result[-1] (slice(5, 10, None), slice(6, 9, None)) In this case, the output of max_blk_coverage is (10, 9), so only this part of the array is covered .. seealso:: :py:func:`blk_nd` Yields blocks of blk size until the remaining part is smaller than `blk` and the yields smaller blocks. """ internals = (blk_1d_short(b, s) for b, s in zip(blk, shape)) return product(*internals)
[ "def", "blk_nd_short", "(", "blk", ",", "shape", ")", ":", "internals", "=", "(", "blk_1d_short", "(", "b", ",", "s", ")", "for", "b", ",", "s", "in", "zip", "(", "blk", ",", "shape", ")", ")", "return", "product", "(", "*", "internals", ")" ]
Iterate trough the blocks that strictly cover an array. Iterate trough the blocks that recover the part of the array given by max_blk_coverage. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: a generator that yields the blocks Example: >>> result = list(blk_nd_short(blk=(5,3), shape=(11, 11))) >>> result[0] (slice(0, 5, None), slice(0, 3, None)) >>> result[1] (slice(0, 5, None), slice(3, 6, None)) >>> result[-1] (slice(5, 10, None), slice(6, 9, None)) In this case, the output of max_blk_coverage is (10, 9), so only this part of the array is covered .. seealso:: :py:func:`blk_nd` Yields blocks of blk size until the remaining part is smaller than `blk` and the yields smaller blocks.
[ "Iterate", "trough", "the", "blocks", "that", "strictly", "cover", "an", "array", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L137-L169
train
guaix-ucm/numina
numina/array/blocks.py
blk_nd
def blk_nd(blk, shape): """Iterate through the blocks that cover an array. This function first iterates trough the blocks that recover the part of the array given by max_blk_coverage and then iterates with smaller blocks for the rest of the array. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: a generator that yields the blocks Example: >>> result = list(blk_nd(blk=(5,3), shape=(11, 11))) >>> result[0] (slice(0, 5, None), slice(0, 3, None)) >>> result[1] (slice(0, 5, None), slice(3, 6, None)) >>> result[-1] (slice(10, 11, None), slice(9, 11, None)) The generator yields blocks of size blk until it covers the part of the array given by :func:`max_blk_coverage` and then yields smaller blocks until it covers the full array. .. seealso:: :py:func:`blk_nd_short` Yields blocks of fixed size """ internals = (blk_1d(b, s) for b, s in zip(blk, shape)) return product(*internals)
python
def blk_nd(blk, shape): """Iterate through the blocks that cover an array. This function first iterates trough the blocks that recover the part of the array given by max_blk_coverage and then iterates with smaller blocks for the rest of the array. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: a generator that yields the blocks Example: >>> result = list(blk_nd(blk=(5,3), shape=(11, 11))) >>> result[0] (slice(0, 5, None), slice(0, 3, None)) >>> result[1] (slice(0, 5, None), slice(3, 6, None)) >>> result[-1] (slice(10, 11, None), slice(9, 11, None)) The generator yields blocks of size blk until it covers the part of the array given by :func:`max_blk_coverage` and then yields smaller blocks until it covers the full array. .. seealso:: :py:func:`blk_nd_short` Yields blocks of fixed size """ internals = (blk_1d(b, s) for b, s in zip(blk, shape)) return product(*internals)
[ "def", "blk_nd", "(", "blk", ",", "shape", ")", ":", "internals", "=", "(", "blk_1d", "(", "b", ",", "s", ")", "for", "b", ",", "s", "in", "zip", "(", "blk", ",", "shape", ")", ")", "return", "product", "(", "*", "internals", ")" ]
Iterate through the blocks that cover an array. This function first iterates trough the blocks that recover the part of the array given by max_blk_coverage and then iterates with smaller blocks for the rest of the array. :param blk: the N-dimensional shape of the block :param shape: the N-dimensional shape of the array :return: a generator that yields the blocks Example: >>> result = list(blk_nd(blk=(5,3), shape=(11, 11))) >>> result[0] (slice(0, 5, None), slice(0, 3, None)) >>> result[1] (slice(0, 5, None), slice(3, 6, None)) >>> result[-1] (slice(10, 11, None), slice(9, 11, None)) The generator yields blocks of size blk until it covers the part of the array given by :func:`max_blk_coverage` and then yields smaller blocks until it covers the full array. .. seealso:: :py:func:`blk_nd_short` Yields blocks of fixed size
[ "Iterate", "through", "the", "blocks", "that", "cover", "an", "array", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L192-L226
train
guaix-ucm/numina
numina/array/blocks.py
block_view
def block_view(arr, block=(3, 3)): """Provide a 2D block view to 2D array. No error checking made. Therefore meaningful (as implemented) only for blocks strictly compatible with the shape of A. """ # simple shape and strides computations may seem at first strange # unless one is able to recognize the 'tuple additions' involved ;-) shape = (arr.shape[0] // block[0], arr.shape[1] // block[1]) + block strides = (block[0] * arr.strides[0], block[1] * arr.strides[1]) + arr.strides return ast(arr, shape=shape, strides=strides)
python
def block_view(arr, block=(3, 3)): """Provide a 2D block view to 2D array. No error checking made. Therefore meaningful (as implemented) only for blocks strictly compatible with the shape of A. """ # simple shape and strides computations may seem at first strange # unless one is able to recognize the 'tuple additions' involved ;-) shape = (arr.shape[0] // block[0], arr.shape[1] // block[1]) + block strides = (block[0] * arr.strides[0], block[1] * arr.strides[1]) + arr.strides return ast(arr, shape=shape, strides=strides)
[ "def", "block_view", "(", "arr", ",", "block", "=", "(", "3", ",", "3", ")", ")", ":", "# simple shape and strides computations may seem at first strange", "# unless one is able to recognize the 'tuple additions' involved ;-)", "shape", "=", "(", "arr", ".", "shape", "[", "0", "]", "//", "block", "[", "0", "]", ",", "arr", ".", "shape", "[", "1", "]", "//", "block", "[", "1", "]", ")", "+", "block", "strides", "=", "(", "block", "[", "0", "]", "*", "arr", ".", "strides", "[", "0", "]", ",", "block", "[", "1", "]", "*", "arr", ".", "strides", "[", "1", "]", ")", "+", "arr", ".", "strides", "return", "ast", "(", "arr", ",", "shape", "=", "shape", ",", "strides", "=", "strides", ")" ]
Provide a 2D block view to 2D array. No error checking made. Therefore meaningful (as implemented) only for blocks strictly compatible with the shape of A.
[ "Provide", "a", "2D", "block", "view", "to", "2D", "array", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/blocks.py#L229-L241
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
is_citeable
def is_citeable(publication_info): """Check some fields in order to define if the article is citeable. :param publication_info: publication_info field already populated :type publication_info: list """ def _item_has_pub_info(item): return all( key in item for key in ( 'journal_title', 'journal_volume' ) ) def _item_has_page_or_artid(item): return any( key in item for key in ( 'page_start', 'artid' ) ) has_pub_info = any( _item_has_pub_info(item) for item in publication_info ) has_page_or_artid = any( _item_has_page_or_artid(item) for item in publication_info ) return has_pub_info and has_page_or_artid
python
def is_citeable(publication_info): """Check some fields in order to define if the article is citeable. :param publication_info: publication_info field already populated :type publication_info: list """ def _item_has_pub_info(item): return all( key in item for key in ( 'journal_title', 'journal_volume' ) ) def _item_has_page_or_artid(item): return any( key in item for key in ( 'page_start', 'artid' ) ) has_pub_info = any( _item_has_pub_info(item) for item in publication_info ) has_page_or_artid = any( _item_has_page_or_artid(item) for item in publication_info ) return has_pub_info and has_page_or_artid
[ "def", "is_citeable", "(", "publication_info", ")", ":", "def", "_item_has_pub_info", "(", "item", ")", ":", "return", "all", "(", "key", "in", "item", "for", "key", "in", "(", "'journal_title'", ",", "'journal_volume'", ")", ")", "def", "_item_has_page_or_artid", "(", "item", ")", ":", "return", "any", "(", "key", "in", "item", "for", "key", "in", "(", "'page_start'", ",", "'artid'", ")", ")", "has_pub_info", "=", "any", "(", "_item_has_pub_info", "(", "item", ")", "for", "item", "in", "publication_info", ")", "has_page_or_artid", "=", "any", "(", "_item_has_page_or_artid", "(", "item", ")", "for", "item", "in", "publication_info", ")", "return", "has_pub_info", "and", "has_page_or_artid" ]
Check some fields in order to define if the article is citeable. :param publication_info: publication_info field already populated :type publication_info: list
[ "Check", "some", "fields", "in", "order", "to", "define", "if", "the", "article", "is", "citeable", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L46-L75
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_abstract
def add_abstract(self, abstract, source=None): """Add abstract. :param abstract: abstract for the current document. :type abstract: string :param source: source for the given abstract. :type source: string """ self._append_to('abstracts', self._sourced_dict( source, value=abstract.strip(), ))
python
def add_abstract(self, abstract, source=None): """Add abstract. :param abstract: abstract for the current document. :type abstract: string :param source: source for the given abstract. :type source: string """ self._append_to('abstracts', self._sourced_dict( source, value=abstract.strip(), ))
[ "def", "add_abstract", "(", "self", ",", "abstract", ",", "source", "=", "None", ")", ":", "self", ".", "_append_to", "(", "'abstracts'", ",", "self", ".", "_sourced_dict", "(", "source", ",", "value", "=", "abstract", ".", "strip", "(", ")", ",", ")", ")" ]
Add abstract. :param abstract: abstract for the current document. :type abstract: string :param source: source for the given abstract. :type source: string
[ "Add", "abstract", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L150-L162
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_arxiv_eprint
def add_arxiv_eprint(self, arxiv_id, arxiv_categories): """Add arxiv eprint. :param arxiv_id: arxiv id for the current document. :type arxiv_id: string :param arxiv_categories: arXiv categories for the current document. :type arxiv_categories: list """ self._append_to('arxiv_eprints', { 'value': arxiv_id, 'categories': arxiv_categories, }) self.set_citeable(True)
python
def add_arxiv_eprint(self, arxiv_id, arxiv_categories): """Add arxiv eprint. :param arxiv_id: arxiv id for the current document. :type arxiv_id: string :param arxiv_categories: arXiv categories for the current document. :type arxiv_categories: list """ self._append_to('arxiv_eprints', { 'value': arxiv_id, 'categories': arxiv_categories, }) self.set_citeable(True)
[ "def", "add_arxiv_eprint", "(", "self", ",", "arxiv_id", ",", "arxiv_categories", ")", ":", "self", ".", "_append_to", "(", "'arxiv_eprints'", ",", "{", "'value'", ":", "arxiv_id", ",", "'categories'", ":", "arxiv_categories", ",", "}", ")", "self", ".", "set_citeable", "(", "True", ")" ]
Add arxiv eprint. :param arxiv_id: arxiv id for the current document. :type arxiv_id: string :param arxiv_categories: arXiv categories for the current document. :type arxiv_categories: list
[ "Add", "arxiv", "eprint", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L165-L178
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_doi
def add_doi(self, doi, source=None, material=None): """Add doi. :param doi: doi for the current document. :type doi: string :param source: source for the doi. :type source: string :param material: material for the doi. :type material: string """ if doi is None: return try: doi = idutils.normalize_doi(doi) except AttributeError: return if not doi: return dois = self._sourced_dict( source, value=doi ) if material is not None: dois['material'] = material self._append_to('dois', dois)
python
def add_doi(self, doi, source=None, material=None): """Add doi. :param doi: doi for the current document. :type doi: string :param source: source for the doi. :type source: string :param material: material for the doi. :type material: string """ if doi is None: return try: doi = idutils.normalize_doi(doi) except AttributeError: return if not doi: return dois = self._sourced_dict( source, value=doi ) if material is not None: dois['material'] = material self._append_to('dois', dois)
[ "def", "add_doi", "(", "self", ",", "doi", ",", "source", "=", "None", ",", "material", "=", "None", ")", ":", "if", "doi", "is", "None", ":", "return", "try", ":", "doi", "=", "idutils", ".", "normalize_doi", "(", "doi", ")", "except", "AttributeError", ":", "return", "if", "not", "doi", ":", "return", "dois", "=", "self", ".", "_sourced_dict", "(", "source", ",", "value", "=", "doi", ")", "if", "material", "is", "not", "None", ":", "dois", "[", "'material'", "]", "=", "material", "self", ".", "_append_to", "(", "'dois'", ",", "dois", ")" ]
Add doi. :param doi: doi for the current document. :type doi: string :param source: source for the doi. :type source: string :param material: material for the doi. :type material: string
[ "Add", "doi", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L181-L211
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.make_author
def make_author(self, full_name, affiliations=(), roles=(), raw_affiliations=(), source=None, ids=(), emails=(), alternative_names=()): """Make a subrecord representing an author. Args: full_name(str): full name of the author. If not yet in standard Inspire form, it will be normalized. affiliations(List[str]): Inspire normalized affiliations of the author. roles(List[str]): Inspire roles of the author. raw_affiliations(List[str]): raw affiliation strings of the author. source(str): source for the affiliations when ``affiliations_normalized`` is ``False``. ids(List[Tuple[str,str]]): list of ids of the author, whose elements are of the form ``(schema, value)``. emails(List[str]): email addresses of the author. alternative_names(List[str]): alternative names of the author. Returns: dict: a schema-compliant subrecord. """ builder = SignatureBuilder() builder.set_full_name(full_name) for affiliation in affiliations: builder.add_affiliation(affiliation) for role in roles: builder.add_inspire_role(role) for raw_affiliation in raw_affiliations: builder.add_raw_affiliation(raw_affiliation, source or self.source) for id_schema, id_value in ids: if id_schema and id_value: builder.set_uid(id_value, schema=id_schema) for email in emails: builder.add_email(email) for alternative_name in alternative_names: builder.add_alternative_name(alternative_name) return builder.obj
python
def make_author(self, full_name, affiliations=(), roles=(), raw_affiliations=(), source=None, ids=(), emails=(), alternative_names=()): """Make a subrecord representing an author. Args: full_name(str): full name of the author. If not yet in standard Inspire form, it will be normalized. affiliations(List[str]): Inspire normalized affiliations of the author. roles(List[str]): Inspire roles of the author. raw_affiliations(List[str]): raw affiliation strings of the author. source(str): source for the affiliations when ``affiliations_normalized`` is ``False``. ids(List[Tuple[str,str]]): list of ids of the author, whose elements are of the form ``(schema, value)``. emails(List[str]): email addresses of the author. alternative_names(List[str]): alternative names of the author. Returns: dict: a schema-compliant subrecord. """ builder = SignatureBuilder() builder.set_full_name(full_name) for affiliation in affiliations: builder.add_affiliation(affiliation) for role in roles: builder.add_inspire_role(role) for raw_affiliation in raw_affiliations: builder.add_raw_affiliation(raw_affiliation, source or self.source) for id_schema, id_value in ids: if id_schema and id_value: builder.set_uid(id_value, schema=id_schema) for email in emails: builder.add_email(email) for alternative_name in alternative_names: builder.add_alternative_name(alternative_name) return builder.obj
[ "def", "make_author", "(", "self", ",", "full_name", ",", "affiliations", "=", "(", ")", ",", "roles", "=", "(", ")", ",", "raw_affiliations", "=", "(", ")", ",", "source", "=", "None", ",", "ids", "=", "(", ")", ",", "emails", "=", "(", ")", ",", "alternative_names", "=", "(", ")", ")", ":", "builder", "=", "SignatureBuilder", "(", ")", "builder", ".", "set_full_name", "(", "full_name", ")", "for", "affiliation", "in", "affiliations", ":", "builder", ".", "add_affiliation", "(", "affiliation", ")", "for", "role", "in", "roles", ":", "builder", ".", "add_inspire_role", "(", "role", ")", "for", "raw_affiliation", "in", "raw_affiliations", ":", "builder", ".", "add_raw_affiliation", "(", "raw_affiliation", ",", "source", "or", "self", ".", "source", ")", "for", "id_schema", ",", "id_value", "in", "ids", ":", "if", "id_schema", "and", "id_value", ":", "builder", ".", "set_uid", "(", "id_value", ",", "schema", "=", "id_schema", ")", "for", "email", "in", "emails", ":", "builder", ".", "add_email", "(", "email", ")", "for", "alternative_name", "in", "alternative_names", ":", "builder", ".", "add_alternative_name", "(", "alternative_name", ")", "return", "builder", ".", "obj" ]
Make a subrecord representing an author. Args: full_name(str): full name of the author. If not yet in standard Inspire form, it will be normalized. affiliations(List[str]): Inspire normalized affiliations of the author. roles(List[str]): Inspire roles of the author. raw_affiliations(List[str]): raw affiliation strings of the author. source(str): source for the affiliations when ``affiliations_normalized`` is ``False``. ids(List[Tuple[str,str]]): list of ids of the author, whose elements are of the form ``(schema, value)``. emails(List[str]): email addresses of the author. alternative_names(List[str]): alternative names of the author. Returns: dict: a schema-compliant subrecord.
[ "Make", "a", "subrecord", "representing", "an", "author", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L224-L273
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_book
def add_book(self, publisher=None, place=None, date=None): """ Make a dictionary that is representing a book. :param publisher: publisher name :type publisher: string :param place: place of publication :type place: string :param date: A (partial) date in any format. The date should contain at least a year :type date: string :rtype: dict """ imprint = {} if date is not None: imprint['date'] = normalize_date(date) if place is not None: imprint['place'] = place if publisher is not None: imprint['publisher'] = publisher self._append_to('imprints', imprint)
python
def add_book(self, publisher=None, place=None, date=None): """ Make a dictionary that is representing a book. :param publisher: publisher name :type publisher: string :param place: place of publication :type place: string :param date: A (partial) date in any format. The date should contain at least a year :type date: string :rtype: dict """ imprint = {} if date is not None: imprint['date'] = normalize_date(date) if place is not None: imprint['place'] = place if publisher is not None: imprint['publisher'] = publisher self._append_to('imprints', imprint)
[ "def", "add_book", "(", "self", ",", "publisher", "=", "None", ",", "place", "=", "None", ",", "date", "=", "None", ")", ":", "imprint", "=", "{", "}", "if", "date", "is", "not", "None", ":", "imprint", "[", "'date'", "]", "=", "normalize_date", "(", "date", ")", "if", "place", "is", "not", "None", ":", "imprint", "[", "'place'", "]", "=", "place", "if", "publisher", "is", "not", "None", ":", "imprint", "[", "'publisher'", "]", "=", "publisher", "self", ".", "_append_to", "(", "'imprints'", ",", "imprint", ")" ]
Make a dictionary that is representing a book. :param publisher: publisher name :type publisher: string :param place: place of publication :type place: string :param date: A (partial) date in any format. The date should contain at least a year :type date: string :rtype: dict
[ "Make", "a", "dictionary", "that", "is", "representing", "a", "book", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L276-L302
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_inspire_categories
def add_inspire_categories(self, subject_terms, source=None): """Add inspire categories. :param subject_terms: user categories for the current document. :type subject_terms: list :param source: source for the given categories. :type source: string """ for category in subject_terms: category_dict = self._sourced_dict( source, term=category, ) self._append_to('inspire_categories', category_dict)
python
def add_inspire_categories(self, subject_terms, source=None): """Add inspire categories. :param subject_terms: user categories for the current document. :type subject_terms: list :param source: source for the given categories. :type source: string """ for category in subject_terms: category_dict = self._sourced_dict( source, term=category, ) self._append_to('inspire_categories', category_dict)
[ "def", "add_inspire_categories", "(", "self", ",", "subject_terms", ",", "source", "=", "None", ")", ":", "for", "category", "in", "subject_terms", ":", "category_dict", "=", "self", ".", "_sourced_dict", "(", "source", ",", "term", "=", "category", ",", ")", "self", ".", "_append_to", "(", "'inspire_categories'", ",", "category_dict", ")" ]
Add inspire categories. :param subject_terms: user categories for the current document. :type subject_terms: list :param source: source for the given categories. :type source: string
[ "Add", "inspire", "categories", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L344-L358
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_keyword
def add_keyword(self, keyword, schema=None, source=None): """Add a keyword. Args: keyword(str): keyword to add. schema(str): schema to which the keyword belongs. source(str): source for the keyword. """ keyword_dict = self._sourced_dict(source, value=keyword) if schema is not None: keyword_dict['schema'] = schema self._append_to('keywords', keyword_dict)
python
def add_keyword(self, keyword, schema=None, source=None): """Add a keyword. Args: keyword(str): keyword to add. schema(str): schema to which the keyword belongs. source(str): source for the keyword. """ keyword_dict = self._sourced_dict(source, value=keyword) if schema is not None: keyword_dict['schema'] = schema self._append_to('keywords', keyword_dict)
[ "def", "add_keyword", "(", "self", ",", "keyword", ",", "schema", "=", "None", ",", "source", "=", "None", ")", ":", "keyword_dict", "=", "self", ".", "_sourced_dict", "(", "source", ",", "value", "=", "keyword", ")", "if", "schema", "is", "not", "None", ":", "keyword_dict", "[", "'schema'", "]", "=", "schema", "self", ".", "_append_to", "(", "'keywords'", ",", "keyword_dict", ")" ]
Add a keyword. Args: keyword(str): keyword to add. schema(str): schema to which the keyword belongs. source(str): source for the keyword.
[ "Add", "a", "keyword", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L361-L374
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_private_note
def add_private_note(self, private_notes, source=None): """Add private notes. :param private_notes: hidden notes for the current document :type private_notes: string :param source: source for the given private notes :type source: string """ self._append_to('_private_notes', self._sourced_dict( source, value=private_notes, ))
python
def add_private_note(self, private_notes, source=None): """Add private notes. :param private_notes: hidden notes for the current document :type private_notes: string :param source: source for the given private notes :type source: string """ self._append_to('_private_notes', self._sourced_dict( source, value=private_notes, ))
[ "def", "add_private_note", "(", "self", ",", "private_notes", ",", "source", "=", "None", ")", ":", "self", ".", "_append_to", "(", "'_private_notes'", ",", "self", ".", "_sourced_dict", "(", "source", ",", "value", "=", "private_notes", ",", ")", ")" ]
Add private notes. :param private_notes: hidden notes for the current document :type private_notes: string :param source: source for the given private notes :type source: string
[ "Add", "private", "notes", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L377-L389
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_publication_info
def add_publication_info( self, year=None, cnum=None, artid=None, page_end=None, page_start=None, journal_issue=None, journal_title=None, journal_volume=None, pubinfo_freetext=None, material=None, parent_record=None, parent_isbn=None, ): """Add publication info. :param year: year of publication :type year: integer :param cnum: inspire conference number :type cnum: string :param artid: article id :type artid: string :param page_end: final page for the article :type page_end: string :param page_start: initial page for the article :type page_start: string :param journal_issue: issue of the journal where the document has been published :type journal_issue: string :param journal_title: title of the journal where the document has been published :type journal_title: string :param journal_volume: volume of the journal where the document has been published :type journal_volume: string :param pubinfo_freetext: Unstructured text describing the publication information. :type pubinfo_freetext: string :param material: material of the article :type material: string :param parent_record: reference for the parent record :type parent_record: string :param parent_isbn: isbn for the parent record :type parent_isbn: string """ # If only journal title is present, and no other fields, assume the # paper was submitted, but not yet published if journal_title and all( not field for field in (cnum, artid, journal_issue, journal_volume, page_start, page_end)): self.add_public_note('Submitted to {}'.format(journal_title)) return publication_item = {} for key in ('cnum', 'artid', 'page_end', 'page_start', 'journal_issue', 'journal_title', 'journal_volume', 'year', 'pubinfo_freetext', 'material'): if locals()[key] is not None: publication_item[key] = locals()[key] if parent_record is not None: parent_item = {'$ref': parent_record} publication_item['parent_record'] = parent_item if parent_isbn is not None: publication_item['parent_isbn'] = normalize_isbn(parent_isbn) if page_start and page_end: try: self.add_number_of_pages( int(page_end) - int(page_start) + 1 ) except (TypeError, ValueError): pass self._append_to('publication_info', publication_item) if is_citeable(self.record['publication_info']): self.set_citeable(True)
python
def add_publication_info( self, year=None, cnum=None, artid=None, page_end=None, page_start=None, journal_issue=None, journal_title=None, journal_volume=None, pubinfo_freetext=None, material=None, parent_record=None, parent_isbn=None, ): """Add publication info. :param year: year of publication :type year: integer :param cnum: inspire conference number :type cnum: string :param artid: article id :type artid: string :param page_end: final page for the article :type page_end: string :param page_start: initial page for the article :type page_start: string :param journal_issue: issue of the journal where the document has been published :type journal_issue: string :param journal_title: title of the journal where the document has been published :type journal_title: string :param journal_volume: volume of the journal where the document has been published :type journal_volume: string :param pubinfo_freetext: Unstructured text describing the publication information. :type pubinfo_freetext: string :param material: material of the article :type material: string :param parent_record: reference for the parent record :type parent_record: string :param parent_isbn: isbn for the parent record :type parent_isbn: string """ # If only journal title is present, and no other fields, assume the # paper was submitted, but not yet published if journal_title and all( not field for field in (cnum, artid, journal_issue, journal_volume, page_start, page_end)): self.add_public_note('Submitted to {}'.format(journal_title)) return publication_item = {} for key in ('cnum', 'artid', 'page_end', 'page_start', 'journal_issue', 'journal_title', 'journal_volume', 'year', 'pubinfo_freetext', 'material'): if locals()[key] is not None: publication_item[key] = locals()[key] if parent_record is not None: parent_item = {'$ref': parent_record} publication_item['parent_record'] = parent_item if parent_isbn is not None: publication_item['parent_isbn'] = normalize_isbn(parent_isbn) if page_start and page_end: try: self.add_number_of_pages( int(page_end) - int(page_start) + 1 ) except (TypeError, ValueError): pass self._append_to('publication_info', publication_item) if is_citeable(self.record['publication_info']): self.set_citeable(True)
[ "def", "add_publication_info", "(", "self", ",", "year", "=", "None", ",", "cnum", "=", "None", ",", "artid", "=", "None", ",", "page_end", "=", "None", ",", "page_start", "=", "None", ",", "journal_issue", "=", "None", ",", "journal_title", "=", "None", ",", "journal_volume", "=", "None", ",", "pubinfo_freetext", "=", "None", ",", "material", "=", "None", ",", "parent_record", "=", "None", ",", "parent_isbn", "=", "None", ",", ")", ":", "# If only journal title is present, and no other fields, assume the", "# paper was submitted, but not yet published", "if", "journal_title", "and", "all", "(", "not", "field", "for", "field", "in", "(", "cnum", ",", "artid", ",", "journal_issue", ",", "journal_volume", ",", "page_start", ",", "page_end", ")", ")", ":", "self", ".", "add_public_note", "(", "'Submitted to {}'", ".", "format", "(", "journal_title", ")", ")", "return", "publication_item", "=", "{", "}", "for", "key", "in", "(", "'cnum'", ",", "'artid'", ",", "'page_end'", ",", "'page_start'", ",", "'journal_issue'", ",", "'journal_title'", ",", "'journal_volume'", ",", "'year'", ",", "'pubinfo_freetext'", ",", "'material'", ")", ":", "if", "locals", "(", ")", "[", "key", "]", "is", "not", "None", ":", "publication_item", "[", "key", "]", "=", "locals", "(", ")", "[", "key", "]", "if", "parent_record", "is", "not", "None", ":", "parent_item", "=", "{", "'$ref'", ":", "parent_record", "}", "publication_item", "[", "'parent_record'", "]", "=", "parent_item", "if", "parent_isbn", "is", "not", "None", ":", "publication_item", "[", "'parent_isbn'", "]", "=", "normalize_isbn", "(", "parent_isbn", ")", "if", "page_start", "and", "page_end", ":", "try", ":", "self", ".", "add_number_of_pages", "(", "int", "(", "page_end", ")", "-", "int", "(", "page_start", ")", "+", "1", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "self", ".", "_append_to", "(", "'publication_info'", ",", "publication_item", ")", "if", "is_citeable", "(", "self", ".", "record", "[", "'publication_info'", "]", ")", ":", "self", ".", "set_citeable", "(", "True", ")" ]
Add publication info. :param year: year of publication :type year: integer :param cnum: inspire conference number :type cnum: string :param artid: article id :type artid: string :param page_end: final page for the article :type page_end: string :param page_start: initial page for the article :type page_start: string :param journal_issue: issue of the journal where the document has been published :type journal_issue: string :param journal_title: title of the journal where the document has been published :type journal_title: string :param journal_volume: volume of the journal where the document has been published :type journal_volume: string :param pubinfo_freetext: Unstructured text describing the publication information. :type pubinfo_freetext: string :param material: material of the article :type material: string :param parent_record: reference for the parent record :type parent_record: string :param parent_isbn: isbn for the parent record :type parent_isbn: string
[ "Add", "publication", "info", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L392-L480
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_thesis
def add_thesis( self, defense_date=None, degree_type=None, institution=None, date=None ): """Add thesis info. :param defense_date: defense date for the current thesis :type defense_date: string. A formatted date is required (yyyy-mm-dd) :param degree_type: degree type for the current thesis :type degree_type: string :param institution: author's affiliation for the current thesis :type institution: string :param date: publication date for the current thesis :type date: string. A formatted date is required (yyyy-mm-dd) """ self.record.setdefault('thesis_info', {}) thesis_item = {} for key in ('defense_date', 'date'): if locals()[key] is not None: thesis_item[key] = locals()[key] if degree_type is not None: thesis_item['degree_type'] = degree_type.lower() if institution is not None: thesis_item['institutions'] = [{'name': institution}] self.record['thesis_info'] = thesis_item
python
def add_thesis( self, defense_date=None, degree_type=None, institution=None, date=None ): """Add thesis info. :param defense_date: defense date for the current thesis :type defense_date: string. A formatted date is required (yyyy-mm-dd) :param degree_type: degree type for the current thesis :type degree_type: string :param institution: author's affiliation for the current thesis :type institution: string :param date: publication date for the current thesis :type date: string. A formatted date is required (yyyy-mm-dd) """ self.record.setdefault('thesis_info', {}) thesis_item = {} for key in ('defense_date', 'date'): if locals()[key] is not None: thesis_item[key] = locals()[key] if degree_type is not None: thesis_item['degree_type'] = degree_type.lower() if institution is not None: thesis_item['institutions'] = [{'name': institution}] self.record['thesis_info'] = thesis_item
[ "def", "add_thesis", "(", "self", ",", "defense_date", "=", "None", ",", "degree_type", "=", "None", ",", "institution", "=", "None", ",", "date", "=", "None", ")", ":", "self", ".", "record", ".", "setdefault", "(", "'thesis_info'", ",", "{", "}", ")", "thesis_item", "=", "{", "}", "for", "key", "in", "(", "'defense_date'", ",", "'date'", ")", ":", "if", "locals", "(", ")", "[", "key", "]", "is", "not", "None", ":", "thesis_item", "[", "key", "]", "=", "locals", "(", ")", "[", "key", "]", "if", "degree_type", "is", "not", "None", ":", "thesis_item", "[", "'degree_type'", "]", "=", "degree_type", ".", "lower", "(", ")", "if", "institution", "is", "not", "None", ":", "thesis_item", "[", "'institutions'", "]", "=", "[", "{", "'name'", ":", "institution", "}", "]", "self", ".", "record", "[", "'thesis_info'", "]", "=", "thesis_item" ]
Add thesis info. :param defense_date: defense date for the current thesis :type defense_date: string. A formatted date is required (yyyy-mm-dd) :param degree_type: degree type for the current thesis :type degree_type: string :param institution: author's affiliation for the current thesis :type institution: string :param date: publication date for the current thesis :type date: string. A formatted date is required (yyyy-mm-dd)
[ "Add", "thesis", "info", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L503-L537
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_license
def add_license( self, url=None, license=None, material=None, imposing=None ): """Add license. :param url: url for the description of the license :type url: string :param license: license type :type license: string :param material: material type :type material: string :param imposing: imposing type :type imposing: string """ hep_license = {} try: license_from_url = get_license_from_url(url) if license_from_url is not None: license = license_from_url except ValueError: pass for key in ('url', 'license', 'material', 'imposing'): if locals()[key] is not None: hep_license[key] = locals()[key] self._append_to('license', hep_license)
python
def add_license( self, url=None, license=None, material=None, imposing=None ): """Add license. :param url: url for the description of the license :type url: string :param license: license type :type license: string :param material: material type :type material: string :param imposing: imposing type :type imposing: string """ hep_license = {} try: license_from_url = get_license_from_url(url) if license_from_url is not None: license = license_from_url except ValueError: pass for key in ('url', 'license', 'material', 'imposing'): if locals()[key] is not None: hep_license[key] = locals()[key] self._append_to('license', hep_license)
[ "def", "add_license", "(", "self", ",", "url", "=", "None", ",", "license", "=", "None", ",", "material", "=", "None", ",", "imposing", "=", "None", ")", ":", "hep_license", "=", "{", "}", "try", ":", "license_from_url", "=", "get_license_from_url", "(", "url", ")", "if", "license_from_url", "is", "not", "None", ":", "license", "=", "license_from_url", "except", "ValueError", ":", "pass", "for", "key", "in", "(", "'url'", ",", "'license'", ",", "'material'", ",", "'imposing'", ")", ":", "if", "locals", "(", ")", "[", "key", "]", "is", "not", "None", ":", "hep_license", "[", "key", "]", "=", "locals", "(", ")", "[", "key", "]", "self", ".", "_append_to", "(", "'license'", ",", "hep_license", ")" ]
Add license. :param url: url for the description of the license :type url: string :param license: license type :type license: string :param material: material type :type material: string :param imposing: imposing type :type imposing: string
[ "Add", "license", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L559-L593
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_public_note
def add_public_note(self, public_note, source=None): """Add public note. :param public_note: public note for the current article. :type public_note: string :param source: source for the given notes. :type source: string """ self._append_to('public_notes', self._sourced_dict( source, value=public_note, ))
python
def add_public_note(self, public_note, source=None): """Add public note. :param public_note: public note for the current article. :type public_note: string :param source: source for the given notes. :type source: string """ self._append_to('public_notes', self._sourced_dict( source, value=public_note, ))
[ "def", "add_public_note", "(", "self", ",", "public_note", ",", "source", "=", "None", ")", ":", "self", ".", "_append_to", "(", "'public_notes'", ",", "self", ".", "_sourced_dict", "(", "source", ",", "value", "=", "public_note", ",", ")", ")" ]
Add public note. :param public_note: public note for the current article. :type public_note: string :param source: source for the given notes. :type source: string
[ "Add", "public", "note", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L596-L608
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_title
def add_title(self, title, subtitle=None, source=None): """Add title. :param title: title for the current document :type title: string :param subtitle: subtitle for the current document :type subtitle: string :param source: source for the given title :type source: string """ title_entry = self._sourced_dict( source, title=title, ) if subtitle is not None: title_entry['subtitle'] = subtitle self._append_to('titles', title_entry)
python
def add_title(self, title, subtitle=None, source=None): """Add title. :param title: title for the current document :type title: string :param subtitle: subtitle for the current document :type subtitle: string :param source: source for the given title :type source: string """ title_entry = self._sourced_dict( source, title=title, ) if subtitle is not None: title_entry['subtitle'] = subtitle self._append_to('titles', title_entry)
[ "def", "add_title", "(", "self", ",", "title", ",", "subtitle", "=", "None", ",", "source", "=", "None", ")", ":", "title_entry", "=", "self", ".", "_sourced_dict", "(", "source", ",", "title", "=", "title", ",", ")", "if", "subtitle", "is", "not", "None", ":", "title_entry", "[", "'subtitle'", "]", "=", "subtitle", "self", ".", "_append_to", "(", "'titles'", ",", "title_entry", ")" ]
Add title. :param title: title for the current document :type title: string :param subtitle: subtitle for the current document :type subtitle: string :param source: source for the given title :type source: string
[ "Add", "title", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L611-L630
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_title_translation
def add_title_translation(self, title, language, source=None): """Add title translation. :param title: translated title :type title: string :param language: language for the original title :type language: string (2 characters ISO639-1) :param source: source for the given title :type source: string """ title_translation = self._sourced_dict( source, title=title, language=language, ) self._append_to('title_translations', title_translation)
python
def add_title_translation(self, title, language, source=None): """Add title translation. :param title: translated title :type title: string :param language: language for the original title :type language: string (2 characters ISO639-1) :param source: source for the given title :type source: string """ title_translation = self._sourced_dict( source, title=title, language=language, ) self._append_to('title_translations', title_translation)
[ "def", "add_title_translation", "(", "self", ",", "title", ",", "language", ",", "source", "=", "None", ")", ":", "title_translation", "=", "self", ".", "_sourced_dict", "(", "source", ",", "title", "=", "title", ",", "language", "=", "language", ",", ")", "self", ".", "_append_to", "(", "'title_translations'", ",", "title_translation", ")" ]
Add title translation. :param title: translated title :type title: string :param language: language for the original title :type language: string (2 characters ISO639-1) :param source: source for the given title :type source: string
[ "Add", "title", "translation", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L633-L651
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_report_number
def add_report_number(self, report_number, source=None): """Add report numbers. :param report_number: report number for the current document :type report_number: string :param source: source for the given report number :type source: string """ self._append_to('report_numbers', self._sourced_dict( source, value=report_number, ))
python
def add_report_number(self, report_number, source=None): """Add report numbers. :param report_number: report number for the current document :type report_number: string :param source: source for the given report number :type source: string """ self._append_to('report_numbers', self._sourced_dict( source, value=report_number, ))
[ "def", "add_report_number", "(", "self", ",", "report_number", ",", "source", "=", "None", ")", ":", "self", ".", "_append_to", "(", "'report_numbers'", ",", "self", ".", "_sourced_dict", "(", "source", ",", "value", "=", "report_number", ",", ")", ")" ]
Add report numbers. :param report_number: report number for the current document :type report_number: string :param source: source for the given report number :type source: string
[ "Add", "report", "numbers", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L665-L677
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_collaboration
def add_collaboration(self, collaboration): """Add collaboration. :param collaboration: collaboration for the current document :type collaboration: string """ collaborations = normalize_collaboration(collaboration) for collaboration in collaborations: self._append_to('collaborations', { 'value': collaboration })
python
def add_collaboration(self, collaboration): """Add collaboration. :param collaboration: collaboration for the current document :type collaboration: string """ collaborations = normalize_collaboration(collaboration) for collaboration in collaborations: self._append_to('collaborations', { 'value': collaboration })
[ "def", "add_collaboration", "(", "self", ",", "collaboration", ")", ":", "collaborations", "=", "normalize_collaboration", "(", "collaboration", ")", "for", "collaboration", "in", "collaborations", ":", "self", ".", "_append_to", "(", "'collaborations'", ",", "{", "'value'", ":", "collaboration", "}", ")" ]
Add collaboration. :param collaboration: collaboration for the current document :type collaboration: string
[ "Add", "collaboration", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L680-L690
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_copyright
def add_copyright( self, material=None, holder=None, statement=None, url=None, year=None ): """Add Copyright. :type material: string :type holder: string :type statement: string :type url: string :type year: int """ copyright = {} for key in ('holder', 'statement', 'url'): if locals()[key] is not None: copyright[key] = locals()[key] if material is not None: copyright['material'] = material.lower() if year is not None: copyright['year'] = int(year) self._append_to('copyright', copyright)
python
def add_copyright( self, material=None, holder=None, statement=None, url=None, year=None ): """Add Copyright. :type material: string :type holder: string :type statement: string :type url: string :type year: int """ copyright = {} for key in ('holder', 'statement', 'url'): if locals()[key] is not None: copyright[key] = locals()[key] if material is not None: copyright['material'] = material.lower() if year is not None: copyright['year'] = int(year) self._append_to('copyright', copyright)
[ "def", "add_copyright", "(", "self", ",", "material", "=", "None", ",", "holder", "=", "None", ",", "statement", "=", "None", ",", "url", "=", "None", ",", "year", "=", "None", ")", ":", "copyright", "=", "{", "}", "for", "key", "in", "(", "'holder'", ",", "'statement'", ",", "'url'", ")", ":", "if", "locals", "(", ")", "[", "key", "]", "is", "not", "None", ":", "copyright", "[", "key", "]", "=", "locals", "(", ")", "[", "key", "]", "if", "material", "is", "not", "None", ":", "copyright", "[", "'material'", "]", "=", "material", ".", "lower", "(", ")", "if", "year", "is", "not", "None", ":", "copyright", "[", "'year'", "]", "=", "int", "(", "year", ")", "self", ".", "_append_to", "(", "'copyright'", ",", "copyright", ")" ]
Add Copyright. :type material: string :type holder: string :type statement: string :type url: string :type year: int
[ "Add", "Copyright", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L754-L785
train
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
LiteratureBuilder.add_figure
def add_figure(self, key, url, **kwargs): """Add a figure. Args: key (string): document key url (string): document url Keyword Args: caption (string): simple description label (string): material (string): original_url (string): original url filename (string): current url Returns: None """ figure = self._check_metadata_for_file(key=key, url=url, **kwargs) for dict_key in ( 'caption', 'label', 'material', 'filename', 'url', 'original_url', ): if kwargs.get(dict_key) is not None: figure[dict_key] = kwargs[dict_key] if key_already_there(figure, self.record.get('figures', ())): raise ValueError( 'There\'s already a figure with the key %s.' % figure['key'] ) self._append_to('figures', figure) self.add_document
python
def add_figure(self, key, url, **kwargs): """Add a figure. Args: key (string): document key url (string): document url Keyword Args: caption (string): simple description label (string): material (string): original_url (string): original url filename (string): current url Returns: None """ figure = self._check_metadata_for_file(key=key, url=url, **kwargs) for dict_key in ( 'caption', 'label', 'material', 'filename', 'url', 'original_url', ): if kwargs.get(dict_key) is not None: figure[dict_key] = kwargs[dict_key] if key_already_there(figure, self.record.get('figures', ())): raise ValueError( 'There\'s already a figure with the key %s.' % figure['key'] ) self._append_to('figures', figure) self.add_document
[ "def", "add_figure", "(", "self", ",", "key", ",", "url", ",", "*", "*", "kwargs", ")", ":", "figure", "=", "self", ".", "_check_metadata_for_file", "(", "key", "=", "key", ",", "url", "=", "url", ",", "*", "*", "kwargs", ")", "for", "dict_key", "in", "(", "'caption'", ",", "'label'", ",", "'material'", ",", "'filename'", ",", "'url'", ",", "'original_url'", ",", ")", ":", "if", "kwargs", ".", "get", "(", "dict_key", ")", "is", "not", "None", ":", "figure", "[", "dict_key", "]", "=", "kwargs", "[", "dict_key", "]", "if", "key_already_there", "(", "figure", ",", "self", ".", "record", ".", "get", "(", "'figures'", ",", "(", ")", ")", ")", ":", "raise", "ValueError", "(", "'There\\'s already a figure with the key %s.'", "%", "figure", "[", "'key'", "]", ")", "self", ".", "_append_to", "(", "'figures'", ",", "figure", ")", "self", ".", "add_document" ]
Add a figure. Args: key (string): document key url (string): document url Keyword Args: caption (string): simple description label (string): material (string): original_url (string): original url filename (string): current url Returns: None
[ "Add", "a", "figure", "." ]
34bc124b62fba565b6b40d1a3c15103a23a05edb
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L863-L899
train
guaix-ucm/numina
numina/array/offrot.py
fit_offset_and_rotation
def fit_offset_and_rotation(coords0, coords1): """Fit a rotation and a traslation between two sets points. Fit a rotation matrix and a traslation bewtween two matched sets consisting of M N-dimensional points Parameters ---------- coords0 : (M, N) array_like coords1 : (M, N) array_lke Returns ------- offset : (N, ) array_like rotation : (N, N) array_like Notes ------ Fit offset and rotation using Kabsch's algorithm [1]_ [2]_ .. [1] Kabsch algorithm: https://en.wikipedia.org/wiki/Kabsch_algorithm .. [2] Also here: http://nghiaho.com/?page_id=671 """ coords0 = numpy.asarray(coords0) coords1 = numpy.asarray(coords1) cp = coords0.mean(axis=0) cq = coords1.mean(axis=0) p0 = coords0 - cp q0 = coords1 - cq crossvar = numpy.dot(numpy.transpose(p0), q0) u, _, vt = linalg.svd(crossvar) d = linalg.det(u) * linalg.det(vt) if d < 0: u[:, -1] = -u[:, -1] rot = numpy.transpose(numpy.dot(u, vt)) # Operation is # B - B0 = R(A - A0) # So off is B0 -R * A0 # The inverse operation is # A - A0 = R* (B- B0) # So inverse off* is A - R* B0 # where R* = transpose(R) # R * off* = -off off = -numpy.dot(rot, cp) + cq return off, rot
python
def fit_offset_and_rotation(coords0, coords1): """Fit a rotation and a traslation between two sets points. Fit a rotation matrix and a traslation bewtween two matched sets consisting of M N-dimensional points Parameters ---------- coords0 : (M, N) array_like coords1 : (M, N) array_lke Returns ------- offset : (N, ) array_like rotation : (N, N) array_like Notes ------ Fit offset and rotation using Kabsch's algorithm [1]_ [2]_ .. [1] Kabsch algorithm: https://en.wikipedia.org/wiki/Kabsch_algorithm .. [2] Also here: http://nghiaho.com/?page_id=671 """ coords0 = numpy.asarray(coords0) coords1 = numpy.asarray(coords1) cp = coords0.mean(axis=0) cq = coords1.mean(axis=0) p0 = coords0 - cp q0 = coords1 - cq crossvar = numpy.dot(numpy.transpose(p0), q0) u, _, vt = linalg.svd(crossvar) d = linalg.det(u) * linalg.det(vt) if d < 0: u[:, -1] = -u[:, -1] rot = numpy.transpose(numpy.dot(u, vt)) # Operation is # B - B0 = R(A - A0) # So off is B0 -R * A0 # The inverse operation is # A - A0 = R* (B- B0) # So inverse off* is A - R* B0 # where R* = transpose(R) # R * off* = -off off = -numpy.dot(rot, cp) + cq return off, rot
[ "def", "fit_offset_and_rotation", "(", "coords0", ",", "coords1", ")", ":", "coords0", "=", "numpy", ".", "asarray", "(", "coords0", ")", "coords1", "=", "numpy", ".", "asarray", "(", "coords1", ")", "cp", "=", "coords0", ".", "mean", "(", "axis", "=", "0", ")", "cq", "=", "coords1", ".", "mean", "(", "axis", "=", "0", ")", "p0", "=", "coords0", "-", "cp", "q0", "=", "coords1", "-", "cq", "crossvar", "=", "numpy", ".", "dot", "(", "numpy", ".", "transpose", "(", "p0", ")", ",", "q0", ")", "u", ",", "_", ",", "vt", "=", "linalg", ".", "svd", "(", "crossvar", ")", "d", "=", "linalg", ".", "det", "(", "u", ")", "*", "linalg", ".", "det", "(", "vt", ")", "if", "d", "<", "0", ":", "u", "[", ":", ",", "-", "1", "]", "=", "-", "u", "[", ":", ",", "-", "1", "]", "rot", "=", "numpy", ".", "transpose", "(", "numpy", ".", "dot", "(", "u", ",", "vt", ")", ")", "# Operation is", "# B - B0 = R(A - A0)", "# So off is B0 -R * A0", "# The inverse operation is", "# A - A0 = R* (B- B0)", "# So inverse off* is A - R* B0", "# where R* = transpose(R)", "# R * off* = -off", "off", "=", "-", "numpy", ".", "dot", "(", "rot", ",", "cp", ")", "+", "cq", "return", "off", ",", "rot" ]
Fit a rotation and a traslation between two sets points. Fit a rotation matrix and a traslation bewtween two matched sets consisting of M N-dimensional points Parameters ---------- coords0 : (M, N) array_like coords1 : (M, N) array_lke Returns ------- offset : (N, ) array_like rotation : (N, N) array_like Notes ------ Fit offset and rotation using Kabsch's algorithm [1]_ [2]_ .. [1] Kabsch algorithm: https://en.wikipedia.org/wiki/Kabsch_algorithm .. [2] Also here: http://nghiaho.com/?page_id=671
[ "Fit", "a", "rotation", "and", "a", "traslation", "between", "two", "sets", "points", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/offrot.py#L17-L71
train
ponty/eagexp
eagexp/image3d.py
pil_image3d
def pil_image3d(input, size=(800, 600), pcb_rotate=(0, 0, 0), timeout=20, showgui=False): ''' same as export_image3d, but there is no output file, PIL object is returned instead ''' f = tempfile.NamedTemporaryFile(suffix='.png', prefix='eagexp_') output = f.name export_image3d(input, output=output, size=size, pcb_rotate=pcb_rotate, timeout=timeout, showgui=showgui) im = Image.open(output) return im
python
def pil_image3d(input, size=(800, 600), pcb_rotate=(0, 0, 0), timeout=20, showgui=False): ''' same as export_image3d, but there is no output file, PIL object is returned instead ''' f = tempfile.NamedTemporaryFile(suffix='.png', prefix='eagexp_') output = f.name export_image3d(input, output=output, size=size, pcb_rotate=pcb_rotate, timeout=timeout, showgui=showgui) im = Image.open(output) return im
[ "def", "pil_image3d", "(", "input", ",", "size", "=", "(", "800", ",", "600", ")", ",", "pcb_rotate", "=", "(", "0", ",", "0", ",", "0", ")", ",", "timeout", "=", "20", ",", "showgui", "=", "False", ")", ":", "f", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.png'", ",", "prefix", "=", "'eagexp_'", ")", "output", "=", "f", ".", "name", "export_image3d", "(", "input", ",", "output", "=", "output", ",", "size", "=", "size", ",", "pcb_rotate", "=", "pcb_rotate", ",", "timeout", "=", "timeout", ",", "showgui", "=", "showgui", ")", "im", "=", "Image", ".", "open", "(", "output", ")", "return", "im" ]
same as export_image3d, but there is no output file, PIL object is returned instead
[ "same", "as", "export_image3d", "but", "there", "is", "no", "output", "file", "PIL", "object", "is", "returned", "instead" ]
1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2
https://github.com/ponty/eagexp/blob/1dd5108c1d8112cc87d1bda64fa6c2784ccf0ff2/eagexp/image3d.py#L80-L91
train
pylp/pylp
pylp/cli/logger.py
_make_color_fn
def _make_color_fn(color): """Create a function that set the foreground color.""" def _color(text = ""): return (_color_sep + color + _color_sep2 + text + _color_sep + "default" + _color_sep2) return _color
python
def _make_color_fn(color): """Create a function that set the foreground color.""" def _color(text = ""): return (_color_sep + color + _color_sep2 + text + _color_sep + "default" + _color_sep2) return _color
[ "def", "_make_color_fn", "(", "color", ")", ":", "def", "_color", "(", "text", "=", "\"\"", ")", ":", "return", "(", "_color_sep", "+", "color", "+", "_color_sep2", "+", "text", "+", "_color_sep", "+", "\"default\"", "+", "_color_sep2", ")", "return", "_color" ]
Create a function that set the foreground color.
[ "Create", "a", "function", "that", "set", "the", "foreground", "color", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/logger.py#L21-L26
train
pylp/pylp
pylp/cli/logger.py
just_log
def just_log(*texts, sep = ""): """Log a text without adding the current time.""" if config.silent: return text = _color_sep + "default" + _color_sep2 + sep.join(texts) array = text.split(_color_sep) for part in array: parts = part.split(_color_sep2, 1) if len(parts) != 2 or not parts[1]: continue if not config.color: print(parts[1], end='') else: colors.foreground(parts[0]) print(parts[1], end='', flush=colors.is_win32) if config.color: colors.foreground("default") print()
python
def just_log(*texts, sep = ""): """Log a text without adding the current time.""" if config.silent: return text = _color_sep + "default" + _color_sep2 + sep.join(texts) array = text.split(_color_sep) for part in array: parts = part.split(_color_sep2, 1) if len(parts) != 2 or not parts[1]: continue if not config.color: print(parts[1], end='') else: colors.foreground(parts[0]) print(parts[1], end='', flush=colors.is_win32) if config.color: colors.foreground("default") print()
[ "def", "just_log", "(", "*", "texts", ",", "sep", "=", "\"\"", ")", ":", "if", "config", ".", "silent", ":", "return", "text", "=", "_color_sep", "+", "\"default\"", "+", "_color_sep2", "+", "sep", ".", "join", "(", "texts", ")", "array", "=", "text", ".", "split", "(", "_color_sep", ")", "for", "part", "in", "array", ":", "parts", "=", "part", ".", "split", "(", "_color_sep2", ",", "1", ")", "if", "len", "(", "parts", ")", "!=", "2", "or", "not", "parts", "[", "1", "]", ":", "continue", "if", "not", "config", ".", "color", ":", "print", "(", "parts", "[", "1", "]", ",", "end", "=", "''", ")", "else", ":", "colors", ".", "foreground", "(", "parts", "[", "0", "]", ")", "print", "(", "parts", "[", "1", "]", ",", "end", "=", "''", ",", "flush", "=", "colors", ".", "is_win32", ")", "if", "config", ".", "color", ":", "colors", ".", "foreground", "(", "\"default\"", ")", "print", "(", ")" ]
Log a text without adding the current time.
[ "Log", "a", "text", "without", "adding", "the", "current", "time", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/logger.py#L34-L55
train
pylp/pylp
pylp/cli/logger.py
log
def log(*texts, sep = ""): """Log a text.""" text = sep.join(texts) count = text.count("\n") just_log("\n" * count, *get_time(), text.replace("\n", ""), sep=sep)
python
def log(*texts, sep = ""): """Log a text.""" text = sep.join(texts) count = text.count("\n") just_log("\n" * count, *get_time(), text.replace("\n", ""), sep=sep)
[ "def", "log", "(", "*", "texts", ",", "sep", "=", "\"\"", ")", ":", "text", "=", "sep", ".", "join", "(", "texts", ")", "count", "=", "text", ".", "count", "(", "\"\\n\"", ")", "just_log", "(", "\"\\n\"", "*", "count", ",", "*", "get_time", "(", ")", ",", "text", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ",", "sep", "=", "sep", ")" ]
Log a text.
[ "Log", "a", "text", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/logger.py#L63-L67
train
pylp/pylp
pylp/lib/src.py
find_files
def find_files(globs): """Find files to include.""" last_cwd = os.getcwd() os.chdir(config.cwd) gex, gin = separate_globs(globs) # Find excluded files exclude = [] for glob in gex: parse_glob(glob, exclude) files = [] include = [] order = 0 # Find included files and removed excluded files for glob in gin: order += 1 array = parse_glob(glob, include) base = find_base(glob) for file in array: if file not in exclude: files.append((order, base, file)) os.chdir(last_cwd) return files
python
def find_files(globs): """Find files to include.""" last_cwd = os.getcwd() os.chdir(config.cwd) gex, gin = separate_globs(globs) # Find excluded files exclude = [] for glob in gex: parse_glob(glob, exclude) files = [] include = [] order = 0 # Find included files and removed excluded files for glob in gin: order += 1 array = parse_glob(glob, include) base = find_base(glob) for file in array: if file not in exclude: files.append((order, base, file)) os.chdir(last_cwd) return files
[ "def", "find_files", "(", "globs", ")", ":", "last_cwd", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "config", ".", "cwd", ")", "gex", ",", "gin", "=", "separate_globs", "(", "globs", ")", "# Find excluded files", "exclude", "=", "[", "]", "for", "glob", "in", "gex", ":", "parse_glob", "(", "glob", ",", "exclude", ")", "files", "=", "[", "]", "include", "=", "[", "]", "order", "=", "0", "# Find included files and removed excluded files", "for", "glob", "in", "gin", ":", "order", "+=", "1", "array", "=", "parse_glob", "(", "glob", ",", "include", ")", "base", "=", "find_base", "(", "glob", ")", "for", "file", "in", "array", ":", "if", "file", "not", "in", "exclude", ":", "files", ".", "append", "(", "(", "order", ",", "base", ",", "file", ")", ")", "os", ".", "chdir", "(", "last_cwd", ")", "return", "files" ]
Find files to include.
[ "Find", "files", "to", "include", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/src.py#L17-L45
train
pylp/pylp
pylp/lib/src.py
src
def src(globs, **options): """Read some files and return a stream.""" # Create an array of globs if only one string is given if isinstance(globs, str): globs = [ globs ] # Find files files = find_files(globs) # Create a stream stream = Stream() # Options options["cwd"] = config.cwd if "base" in options: options["base"] = os.path.abspath(options["base"]) # Create a File object for each file to include for infile in files: file = File(infile[2], **options) file.relpath = file.path file.order = infile[0] file.base = options.get("base", infile[1]) stream.append_file(file) # No more files to add stream.end_of_stream() # Pipe a file reader and return the stream if options.get("read", True): return stream.pipe(FileReader()) return stream
python
def src(globs, **options): """Read some files and return a stream.""" # Create an array of globs if only one string is given if isinstance(globs, str): globs = [ globs ] # Find files files = find_files(globs) # Create a stream stream = Stream() # Options options["cwd"] = config.cwd if "base" in options: options["base"] = os.path.abspath(options["base"]) # Create a File object for each file to include for infile in files: file = File(infile[2], **options) file.relpath = file.path file.order = infile[0] file.base = options.get("base", infile[1]) stream.append_file(file) # No more files to add stream.end_of_stream() # Pipe a file reader and return the stream if options.get("read", True): return stream.pipe(FileReader()) return stream
[ "def", "src", "(", "globs", ",", "*", "*", "options", ")", ":", "# Create an array of globs if only one string is given", "if", "isinstance", "(", "globs", ",", "str", ")", ":", "globs", "=", "[", "globs", "]", "# Find files", "files", "=", "find_files", "(", "globs", ")", "# Create a stream", "stream", "=", "Stream", "(", ")", "# Options", "options", "[", "\"cwd\"", "]", "=", "config", ".", "cwd", "if", "\"base\"", "in", "options", ":", "options", "[", "\"base\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "options", "[", "\"base\"", "]", ")", "# Create a File object for each file to include", "for", "infile", "in", "files", ":", "file", "=", "File", "(", "infile", "[", "2", "]", ",", "*", "*", "options", ")", "file", ".", "relpath", "=", "file", ".", "path", "file", ".", "order", "=", "infile", "[", "0", "]", "file", ".", "base", "=", "options", ".", "get", "(", "\"base\"", ",", "infile", "[", "1", "]", ")", "stream", ".", "append_file", "(", "file", ")", "# No more files to add", "stream", ".", "end_of_stream", "(", ")", "# Pipe a file reader and return the stream", "if", "options", ".", "get", "(", "\"read\"", ",", "True", ")", ":", "return", "stream", ".", "pipe", "(", "FileReader", "(", ")", ")", "return", "stream" ]
Read some files and return a stream.
[ "Read", "some", "files", "and", "return", "a", "stream", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/src.py#L49-L82
train
guaix-ucm/numina
numina/logger.py
log_to_history
def log_to_history(logger, name): """Decorate function, adding a logger handler stored in FITS.""" def log_to_history_decorator(method): def l2h_method(self, ri): history_header = fits.Header() fh = FITSHistoryHandler(history_header) fh.setLevel(logging.INFO) logger.addHandler(fh) try: result = method(self, ri) field = getattr(result, name, None) if field: with field.open() as hdulist: hdr = hdulist[0].header hdr.extend(history_header.cards) return result finally: logger.removeHandler(fh) return l2h_method return log_to_history_decorator
python
def log_to_history(logger, name): """Decorate function, adding a logger handler stored in FITS.""" def log_to_history_decorator(method): def l2h_method(self, ri): history_header = fits.Header() fh = FITSHistoryHandler(history_header) fh.setLevel(logging.INFO) logger.addHandler(fh) try: result = method(self, ri) field = getattr(result, name, None) if field: with field.open() as hdulist: hdr = hdulist[0].header hdr.extend(history_header.cards) return result finally: logger.removeHandler(fh) return l2h_method return log_to_history_decorator
[ "def", "log_to_history", "(", "logger", ",", "name", ")", ":", "def", "log_to_history_decorator", "(", "method", ")", ":", "def", "l2h_method", "(", "self", ",", "ri", ")", ":", "history_header", "=", "fits", ".", "Header", "(", ")", "fh", "=", "FITSHistoryHandler", "(", "history_header", ")", "fh", ".", "setLevel", "(", "logging", ".", "INFO", ")", "logger", ".", "addHandler", "(", "fh", ")", "try", ":", "result", "=", "method", "(", "self", ",", "ri", ")", "field", "=", "getattr", "(", "result", ",", "name", ",", "None", ")", "if", "field", ":", "with", "field", ".", "open", "(", ")", "as", "hdulist", ":", "hdr", "=", "hdulist", "[", "0", "]", ".", "header", "hdr", ".", "extend", "(", "history_header", ".", "cards", ")", "return", "result", "finally", ":", "logger", ".", "removeHandler", "(", "fh", ")", "return", "l2h_method", "return", "log_to_history_decorator" ]
Decorate function, adding a logger handler stored in FITS.
[ "Decorate", "function", "adding", "a", "logger", "handler", "stored", "in", "FITS", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/logger.py#L30-L54
train
guaix-ucm/numina
numina/types/base.py
DataTypeBase.create_db_info
def create_db_info(): """Create metadata structure""" result = {} result['instrument'] = '' result['uuid'] = '' result['tags'] = {} result['type'] = '' result['mode'] = '' result['observation_date'] = "" result['origin'] = {} return result
python
def create_db_info(): """Create metadata structure""" result = {} result['instrument'] = '' result['uuid'] = '' result['tags'] = {} result['type'] = '' result['mode'] = '' result['observation_date'] = "" result['origin'] = {} return result
[ "def", "create_db_info", "(", ")", ":", "result", "=", "{", "}", "result", "[", "'instrument'", "]", "=", "''", "result", "[", "'uuid'", "]", "=", "''", "result", "[", "'tags'", "]", "=", "{", "}", "result", "[", "'type'", "]", "=", "''", "result", "[", "'mode'", "]", "=", "''", "result", "[", "'observation_date'", "]", "=", "\"\"", "result", "[", "'origin'", "]", "=", "{", "}", "return", "result" ]
Create metadata structure
[ "Create", "metadata", "structure" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/types/base.py#L151-L161
train
pylp/pylp
pylp/utils/decorators.py
task
def task(obj = None, deps = None): """Decorator for creating a task.""" # The decorator is not used as a function if callable(obj): __task(obj.__name__, obj) return obj # The decorator is used as a function def __decorated(func): __task(obj if obj else obj.__name__, deps, func) return func return __decorated
python
def task(obj = None, deps = None): """Decorator for creating a task.""" # The decorator is not used as a function if callable(obj): __task(obj.__name__, obj) return obj # The decorator is used as a function def __decorated(func): __task(obj if obj else obj.__name__, deps, func) return func return __decorated
[ "def", "task", "(", "obj", "=", "None", ",", "deps", "=", "None", ")", ":", "# The decorator is not used as a function", "if", "callable", "(", "obj", ")", ":", "__task", "(", "obj", ".", "__name__", ",", "obj", ")", "return", "obj", "# The decorator is used as a function", "def", "__decorated", "(", "func", ")", ":", "__task", "(", "obj", "if", "obj", "else", "obj", ".", "__name__", ",", "deps", ",", "func", ")", "return", "func", "return", "__decorated" ]
Decorator for creating a task.
[ "Decorator", "for", "creating", "a", "task", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/utils/decorators.py#L18-L30
train
rfk/playitagainsam
playitagainsam/recorder.py
Recorder._read_one_byte
def _read_one_byte(self, fd): """Read a single byte, or raise OSError on failure.""" c = os.read(fd, 1) if not c: raise OSError return c
python
def _read_one_byte(self, fd): """Read a single byte, or raise OSError on failure.""" c = os.read(fd, 1) if not c: raise OSError return c
[ "def", "_read_one_byte", "(", "self", ",", "fd", ")", ":", "c", "=", "os", ".", "read", "(", "fd", ",", "1", ")", "if", "not", "c", ":", "raise", "OSError", "return", "c" ]
Read a single byte, or raise OSError on failure.
[ "Read", "a", "single", "byte", "or", "raise", "OSError", "on", "failure", "." ]
897cc8e8ca920a4afb8597b4a345361065a3f108
https://github.com/rfk/playitagainsam/blob/897cc8e8ca920a4afb8597b4a345361065a3f108/playitagainsam/recorder.py#L141-L146
train
guaix-ucm/numina
numina/tools/arg_file_is_new.py
arg_file_is_new
def arg_file_is_new(parser, arg, mode='w'): """Auxiliary function to give an error if the file already exists. Parameters ---------- parser : parser object Instance of argparse.ArgumentParser() arg : string File name. mode : string Optional string that specifies the mode in which the file is opened. Returns ------- handler : file object Open file handle. """ if os.path.exists(arg): parser.error("\nThe file \"%s\"\nalready exists and " "cannot be overwritten!" % arg) else: # return an open file handle handler = open(arg, mode=mode) return handler
python
def arg_file_is_new(parser, arg, mode='w'): """Auxiliary function to give an error if the file already exists. Parameters ---------- parser : parser object Instance of argparse.ArgumentParser() arg : string File name. mode : string Optional string that specifies the mode in which the file is opened. Returns ------- handler : file object Open file handle. """ if os.path.exists(arg): parser.error("\nThe file \"%s\"\nalready exists and " "cannot be overwritten!" % arg) else: # return an open file handle handler = open(arg, mode=mode) return handler
[ "def", "arg_file_is_new", "(", "parser", ",", "arg", ",", "mode", "=", "'w'", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "arg", ")", ":", "parser", ".", "error", "(", "\"\\nThe file \\\"%s\\\"\\nalready exists and \"", "\"cannot be overwritten!\"", "%", "arg", ")", "else", ":", "# return an open file handle", "handler", "=", "open", "(", "arg", ",", "mode", "=", "mode", ")", "return", "handler" ]
Auxiliary function to give an error if the file already exists. Parameters ---------- parser : parser object Instance of argparse.ArgumentParser() arg : string File name. mode : string Optional string that specifies the mode in which the file is opened. Returns ------- handler : file object Open file handle.
[ "Auxiliary", "function", "to", "give", "an", "error", "if", "the", "file", "already", "exists", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/tools/arg_file_is_new.py#L17-L42
train
guaix-ucm/numina
numina/array/ccd_line.py
intersection_spectrail_arcline
def intersection_spectrail_arcline(spectrail, arcline): """Compute intersection of spectrum trail with arc line. Parameters ---------- spectrail : SpectrumTrail object Instance of SpectrumTrail class. arcline : ArcLine object Instance of ArcLine class Returns ------- xroot, yroot : tuple of floats (X,Y) coordinates of the intersection. """ # approximate location of the solution expected_x = (arcline.xlower_line + arcline.xupper_line) / 2.0 # composition of polynomials to find intersection as # one of the roots of a new polynomial rootfunct = arcline.poly_funct(spectrail.poly_funct) rootfunct.coef[1] -= 1 # compute roots to find solution tmp_xroots = rootfunct.roots() # take the nearest root to the expected location xroot = tmp_xroots[np.abs(tmp_xroots - expected_x).argmin()] if np.isreal(xroot): xroot = xroot.real else: raise ValueError("xroot=" + str(xroot) + " is a complex number") yroot = spectrail.poly_funct(xroot) return xroot, yroot
python
def intersection_spectrail_arcline(spectrail, arcline): """Compute intersection of spectrum trail with arc line. Parameters ---------- spectrail : SpectrumTrail object Instance of SpectrumTrail class. arcline : ArcLine object Instance of ArcLine class Returns ------- xroot, yroot : tuple of floats (X,Y) coordinates of the intersection. """ # approximate location of the solution expected_x = (arcline.xlower_line + arcline.xupper_line) / 2.0 # composition of polynomials to find intersection as # one of the roots of a new polynomial rootfunct = arcline.poly_funct(spectrail.poly_funct) rootfunct.coef[1] -= 1 # compute roots to find solution tmp_xroots = rootfunct.roots() # take the nearest root to the expected location xroot = tmp_xroots[np.abs(tmp_xroots - expected_x).argmin()] if np.isreal(xroot): xroot = xroot.real else: raise ValueError("xroot=" + str(xroot) + " is a complex number") yroot = spectrail.poly_funct(xroot) return xroot, yroot
[ "def", "intersection_spectrail_arcline", "(", "spectrail", ",", "arcline", ")", ":", "# approximate location of the solution", "expected_x", "=", "(", "arcline", ".", "xlower_line", "+", "arcline", ".", "xupper_line", ")", "/", "2.0", "# composition of polynomials to find intersection as", "# one of the roots of a new polynomial", "rootfunct", "=", "arcline", ".", "poly_funct", "(", "spectrail", ".", "poly_funct", ")", "rootfunct", ".", "coef", "[", "1", "]", "-=", "1", "# compute roots to find solution", "tmp_xroots", "=", "rootfunct", ".", "roots", "(", ")", "# take the nearest root to the expected location", "xroot", "=", "tmp_xroots", "[", "np", ".", "abs", "(", "tmp_xroots", "-", "expected_x", ")", ".", "argmin", "(", ")", "]", "if", "np", ".", "isreal", "(", "xroot", ")", ":", "xroot", "=", "xroot", ".", "real", "else", ":", "raise", "ValueError", "(", "\"xroot=\"", "+", "str", "(", "xroot", ")", "+", "\" is a complex number\"", ")", "yroot", "=", "spectrail", ".", "poly_funct", "(", "xroot", ")", "return", "xroot", ",", "yroot" ]
Compute intersection of spectrum trail with arc line. Parameters ---------- spectrail : SpectrumTrail object Instance of SpectrumTrail class. arcline : ArcLine object Instance of ArcLine class Returns ------- xroot, yroot : tuple of floats (X,Y) coordinates of the intersection.
[ "Compute", "intersection", "of", "spectrum", "trail", "with", "arc", "line", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/ccd_line.py#L250-L286
train
guaix-ucm/numina
numina/array/ccd_line.py
CCDLine.offset
def offset(self, offset_value): """Return a copy of self, shifted a constant offset. Parameters ---------- offset_value : float Number of pixels to shift the CCDLine. """ new_instance = deepcopy(self) new_instance.poly_funct.coef[0] += offset_value return new_instance
python
def offset(self, offset_value): """Return a copy of self, shifted a constant offset. Parameters ---------- offset_value : float Number of pixels to shift the CCDLine. """ new_instance = deepcopy(self) new_instance.poly_funct.coef[0] += offset_value return new_instance
[ "def", "offset", "(", "self", ",", "offset_value", ")", ":", "new_instance", "=", "deepcopy", "(", "self", ")", "new_instance", ".", "poly_funct", ".", "coef", "[", "0", "]", "+=", "offset_value", "return", "new_instance" ]
Return a copy of self, shifted a constant offset. Parameters ---------- offset_value : float Number of pixels to shift the CCDLine.
[ "Return", "a", "copy", "of", "self", "shifted", "a", "constant", "offset", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/ccd_line.py#L205-L217
train
guaix-ucm/numina
numina/tools/imath.py
compute_operation
def compute_operation(file1, file2, operation, output, display, args_z1z2, args_bbox, args_keystitle, args_geometry): """Compute output = file1 operation file2. Parameters ---------- file1 : file object First FITS file. file2 : file object Second FITS file. operation : string Mathematical operation. output : file object Output FITS file. display : string Character string indication whether the images are displayed. Valid values are 'all', 'result' and 'none' (default). args_z1z2 : string or None String providing the image cuts tuple: z1, z2, minmax or None. args_bbox : string or None String providing the bounding box tuple: nc1, nc2, ns1, ns2. args_keystitle : string or None Tuple of FITS keywords.format: key1,key2,...,keyn.format args_geometry : string or None Tuple x,y,dx,dy to define the Qt backend geometry. """ # read first FITS file with fits.open(file1) as hdulist: image_header1 = hdulist[0].header image1 = hdulist[0].data.astype(np.float) naxis1 = image_header1['naxis1'] naxis2 = image_header1['naxis2'] # if required, display file1 if display == 'all': ximshow_file(file1.name, args_z1z2=args_z1z2, args_bbox=args_bbox, args_keystitle=args_keystitle, args_geometry=args_geometry, debugplot=12) # read second FITS file with fits.open(file2) as hdulist: image_header2 = hdulist[0].header image2 = hdulist[0].data.astype(np.float) naxis1_ = image_header2['naxis1'] naxis2_ = image_header2['naxis2'] # if required, display file2 if display == 'all': ximshow_file(file2.name, args_z1z2=args_z1z2, args_bbox=args_bbox, args_keystitle=args_keystitle, args_geometry=args_geometry, debugplot=12) # check dimensions if naxis1 != naxis1_: raise ValueError("NAXIS1 values are different.") if naxis2 != naxis2_: raise ValueError("NAXIS2 values are different.") # compute operation if operation == "+": solution = image1 + image2 elif operation == "-": solution = image1 - image2 elif operation == "*": solution = image1 * image2 elif operation == "/": solution = image1 / image2 else: raise ValueError("Unexpected operation=" + str(operation)) # save output file hdu = fits.PrimaryHDU(solution.astype(np.float), image_header1) hdu.writeto(output, overwrite=True) # if required, display result if display in ['all', 'result']: ximshow_file(output.name, args_z1z2=args_z1z2, args_bbox=args_bbox, args_keystitle=args_keystitle, args_geometry=args_geometry, debugplot=12)
python
def compute_operation(file1, file2, operation, output, display, args_z1z2, args_bbox, args_keystitle, args_geometry): """Compute output = file1 operation file2. Parameters ---------- file1 : file object First FITS file. file2 : file object Second FITS file. operation : string Mathematical operation. output : file object Output FITS file. display : string Character string indication whether the images are displayed. Valid values are 'all', 'result' and 'none' (default). args_z1z2 : string or None String providing the image cuts tuple: z1, z2, minmax or None. args_bbox : string or None String providing the bounding box tuple: nc1, nc2, ns1, ns2. args_keystitle : string or None Tuple of FITS keywords.format: key1,key2,...,keyn.format args_geometry : string or None Tuple x,y,dx,dy to define the Qt backend geometry. """ # read first FITS file with fits.open(file1) as hdulist: image_header1 = hdulist[0].header image1 = hdulist[0].data.astype(np.float) naxis1 = image_header1['naxis1'] naxis2 = image_header1['naxis2'] # if required, display file1 if display == 'all': ximshow_file(file1.name, args_z1z2=args_z1z2, args_bbox=args_bbox, args_keystitle=args_keystitle, args_geometry=args_geometry, debugplot=12) # read second FITS file with fits.open(file2) as hdulist: image_header2 = hdulist[0].header image2 = hdulist[0].data.astype(np.float) naxis1_ = image_header2['naxis1'] naxis2_ = image_header2['naxis2'] # if required, display file2 if display == 'all': ximshow_file(file2.name, args_z1z2=args_z1z2, args_bbox=args_bbox, args_keystitle=args_keystitle, args_geometry=args_geometry, debugplot=12) # check dimensions if naxis1 != naxis1_: raise ValueError("NAXIS1 values are different.") if naxis2 != naxis2_: raise ValueError("NAXIS2 values are different.") # compute operation if operation == "+": solution = image1 + image2 elif operation == "-": solution = image1 - image2 elif operation == "*": solution = image1 * image2 elif operation == "/": solution = image1 / image2 else: raise ValueError("Unexpected operation=" + str(operation)) # save output file hdu = fits.PrimaryHDU(solution.astype(np.float), image_header1) hdu.writeto(output, overwrite=True) # if required, display result if display in ['all', 'result']: ximshow_file(output.name, args_z1z2=args_z1z2, args_bbox=args_bbox, args_keystitle=args_keystitle, args_geometry=args_geometry, debugplot=12)
[ "def", "compute_operation", "(", "file1", ",", "file2", ",", "operation", ",", "output", ",", "display", ",", "args_z1z2", ",", "args_bbox", ",", "args_keystitle", ",", "args_geometry", ")", ":", "# read first FITS file", "with", "fits", ".", "open", "(", "file1", ")", "as", "hdulist", ":", "image_header1", "=", "hdulist", "[", "0", "]", ".", "header", "image1", "=", "hdulist", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "float", ")", "naxis1", "=", "image_header1", "[", "'naxis1'", "]", "naxis2", "=", "image_header1", "[", "'naxis2'", "]", "# if required, display file1", "if", "display", "==", "'all'", ":", "ximshow_file", "(", "file1", ".", "name", ",", "args_z1z2", "=", "args_z1z2", ",", "args_bbox", "=", "args_bbox", ",", "args_keystitle", "=", "args_keystitle", ",", "args_geometry", "=", "args_geometry", ",", "debugplot", "=", "12", ")", "# read second FITS file", "with", "fits", ".", "open", "(", "file2", ")", "as", "hdulist", ":", "image_header2", "=", "hdulist", "[", "0", "]", ".", "header", "image2", "=", "hdulist", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "float", ")", "naxis1_", "=", "image_header2", "[", "'naxis1'", "]", "naxis2_", "=", "image_header2", "[", "'naxis2'", "]", "# if required, display file2", "if", "display", "==", "'all'", ":", "ximshow_file", "(", "file2", ".", "name", ",", "args_z1z2", "=", "args_z1z2", ",", "args_bbox", "=", "args_bbox", ",", "args_keystitle", "=", "args_keystitle", ",", "args_geometry", "=", "args_geometry", ",", "debugplot", "=", "12", ")", "# check dimensions", "if", "naxis1", "!=", "naxis1_", ":", "raise", "ValueError", "(", "\"NAXIS1 values are different.\"", ")", "if", "naxis2", "!=", "naxis2_", ":", "raise", "ValueError", "(", "\"NAXIS2 values are different.\"", ")", "# compute operation", "if", "operation", "==", "\"+\"", ":", "solution", "=", "image1", "+", "image2", "elif", "operation", "==", "\"-\"", ":", "solution", "=", "image1", "-", "image2", "elif", "operation", "==", "\"*\"", ":", "solution", "=", "image1", "*", "image2", "elif", "operation", "==", "\"/\"", ":", "solution", "=", "image1", "/", "image2", "else", ":", "raise", "ValueError", "(", "\"Unexpected operation=\"", "+", "str", "(", "operation", ")", ")", "# save output file", "hdu", "=", "fits", ".", "PrimaryHDU", "(", "solution", ".", "astype", "(", "np", ".", "float", ")", ",", "image_header1", ")", "hdu", ".", "writeto", "(", "output", ",", "overwrite", "=", "True", ")", "# if required, display result", "if", "display", "in", "[", "'all'", ",", "'result'", "]", ":", "ximshow_file", "(", "output", ".", "name", ",", "args_z1z2", "=", "args_z1z2", ",", "args_bbox", "=", "args_bbox", ",", "args_keystitle", "=", "args_keystitle", ",", "args_geometry", "=", "args_geometry", ",", "debugplot", "=", "12", ")" ]
Compute output = file1 operation file2. Parameters ---------- file1 : file object First FITS file. file2 : file object Second FITS file. operation : string Mathematical operation. output : file object Output FITS file. display : string Character string indication whether the images are displayed. Valid values are 'all', 'result' and 'none' (default). args_z1z2 : string or None String providing the image cuts tuple: z1, z2, minmax or None. args_bbox : string or None String providing the bounding box tuple: nc1, nc2, ns1, ns2. args_keystitle : string or None Tuple of FITS keywords.format: key1,key2,...,keyn.format args_geometry : string or None Tuple x,y,dx,dy to define the Qt backend geometry.
[ "Compute", "output", "=", "file1", "operation", "file2", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/tools/imath.py#L25-L111
train
guaix-ucm/numina
numina/array/stats.py
robust_std
def robust_std(x, debug=False): """Compute a robust estimator of the standard deviation See Eq. 3.36 (page 84) in Statistics, Data Mining, and Machine in Astronomy, by Ivezic, Connolly, VanderPlas & Gray Parameters ---------- x : 1d numpy array, float Array of input values which standard deviation is requested. debug : bool If True prints computed values Returns ------- sigmag : float Robust estimator of the standar deviation """ x = numpy.asarray(x) # compute percentiles and robust estimator q25 = numpy.percentile(x, 25) q75 = numpy.percentile(x, 75) sigmag = 0.7413 * (q75 - q25) if debug: print('debug|sigmag -> q25......................:', q25) print('debug|sigmag -> q75......................:', q75) print('debug|sigmag -> Robust standard deviation:', sigmag) return sigmag
python
def robust_std(x, debug=False): """Compute a robust estimator of the standard deviation See Eq. 3.36 (page 84) in Statistics, Data Mining, and Machine in Astronomy, by Ivezic, Connolly, VanderPlas & Gray Parameters ---------- x : 1d numpy array, float Array of input values which standard deviation is requested. debug : bool If True prints computed values Returns ------- sigmag : float Robust estimator of the standar deviation """ x = numpy.asarray(x) # compute percentiles and robust estimator q25 = numpy.percentile(x, 25) q75 = numpy.percentile(x, 75) sigmag = 0.7413 * (q75 - q25) if debug: print('debug|sigmag -> q25......................:', q25) print('debug|sigmag -> q75......................:', q75) print('debug|sigmag -> Robust standard deviation:', sigmag) return sigmag
[ "def", "robust_std", "(", "x", ",", "debug", "=", "False", ")", ":", "x", "=", "numpy", ".", "asarray", "(", "x", ")", "# compute percentiles and robust estimator", "q25", "=", "numpy", ".", "percentile", "(", "x", ",", "25", ")", "q75", "=", "numpy", ".", "percentile", "(", "x", ",", "75", ")", "sigmag", "=", "0.7413", "*", "(", "q75", "-", "q25", ")", "if", "debug", ":", "print", "(", "'debug|sigmag -> q25......................:'", ",", "q25", ")", "print", "(", "'debug|sigmag -> q75......................:'", ",", "q75", ")", "print", "(", "'debug|sigmag -> Robust standard deviation:'", ",", "sigmag", ")", "return", "sigmag" ]
Compute a robust estimator of the standard deviation See Eq. 3.36 (page 84) in Statistics, Data Mining, and Machine in Astronomy, by Ivezic, Connolly, VanderPlas & Gray Parameters ---------- x : 1d numpy array, float Array of input values which standard deviation is requested. debug : bool If True prints computed values Returns ------- sigmag : float Robust estimator of the standar deviation
[ "Compute", "a", "robust", "estimator", "of", "the", "standard", "deviation" ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/stats.py#L17-L48
train
guaix-ucm/numina
numina/array/stats.py
summary
def summary(x, rm_nan=False, debug=False): """Compute basic statistical parameters. Parameters ---------- x : 1d numpy array, float Input array with values which statistical properties are requested. rm_nan : bool If True, filter out NaN values before computing statistics. debug : bool If True prints computed values. Returns ------- result : Python dictionary Number of points, minimum, percentile 25, percentile 50 (median), mean, percentile 75, maximum, standard deviation, robust standard deviation, percentile 15.866 (equivalent to -1 sigma in a normal distribution) and percentile 84.134 (+1 sigma). """ # protections if type(x) is np.ndarray: xx = np.copy(x) else: if type(x) is list: xx = np.array(x) else: raise ValueError('x=' + str(x) + ' must be a numpy.ndarray') if xx.ndim is not 1: raise ValueError('xx.dim=' + str(xx.ndim) + ' must be 1') # filter out NaN's if rm_nan: xx = xx[np.logical_not(np.isnan(xx))] # compute basic statistics npoints = len(xx) ok = npoints > 0 result = { 'npoints' : npoints, 'minimum' : np.min(xx) if ok else 0, 'percentile25' : np.percentile(xx, 25) if ok else 0, 'median' : np.percentile(xx, 50) if ok else 0, 'mean' : np.mean(xx) if ok else 0, 'percentile75': np.percentile(xx, 75) if ok else 0, 'maximum' : np.max(xx) if ok else 0, 'std': np.std(xx) if ok else 0, 'robust_std' : robust_std(xx) if ok else 0, 'percentile15': np.percentile(xx, 15.86553) if ok else 0, 'percentile84': np.percentile(xx, 84.13447) if ok else 0 } if debug: print('>>> ========================================') print('>>> STATISTICAL SUMMARY:') print('>>> ----------------------------------------') print('>>> Number of points.........:', result['npoints']) print('>>> Minimum..................:', result['minimum']) print('>>> 1st Quartile.............:', result['percentile25']) print('>>> Median...................:', result['median']) print('>>> Mean.....................:', result['mean']) print('>>> 3rd Quartile.............:', result['percentile75']) print('>>> Maximum..................:', result['maximum']) print('>>> ----------------------------------------') print('>>> Standard deviation.......:', result['std']) print('>>> Robust standard deviation:', result['robust_std']) print('>>> 0.1586553 percentile.....:', result['percentile15']) print('>>> 0.8413447 percentile.....:', result['percentile84']) print('>>> ========================================') return result
python
def summary(x, rm_nan=False, debug=False): """Compute basic statistical parameters. Parameters ---------- x : 1d numpy array, float Input array with values which statistical properties are requested. rm_nan : bool If True, filter out NaN values before computing statistics. debug : bool If True prints computed values. Returns ------- result : Python dictionary Number of points, minimum, percentile 25, percentile 50 (median), mean, percentile 75, maximum, standard deviation, robust standard deviation, percentile 15.866 (equivalent to -1 sigma in a normal distribution) and percentile 84.134 (+1 sigma). """ # protections if type(x) is np.ndarray: xx = np.copy(x) else: if type(x) is list: xx = np.array(x) else: raise ValueError('x=' + str(x) + ' must be a numpy.ndarray') if xx.ndim is not 1: raise ValueError('xx.dim=' + str(xx.ndim) + ' must be 1') # filter out NaN's if rm_nan: xx = xx[np.logical_not(np.isnan(xx))] # compute basic statistics npoints = len(xx) ok = npoints > 0 result = { 'npoints' : npoints, 'minimum' : np.min(xx) if ok else 0, 'percentile25' : np.percentile(xx, 25) if ok else 0, 'median' : np.percentile(xx, 50) if ok else 0, 'mean' : np.mean(xx) if ok else 0, 'percentile75': np.percentile(xx, 75) if ok else 0, 'maximum' : np.max(xx) if ok else 0, 'std': np.std(xx) if ok else 0, 'robust_std' : robust_std(xx) if ok else 0, 'percentile15': np.percentile(xx, 15.86553) if ok else 0, 'percentile84': np.percentile(xx, 84.13447) if ok else 0 } if debug: print('>>> ========================================') print('>>> STATISTICAL SUMMARY:') print('>>> ----------------------------------------') print('>>> Number of points.........:', result['npoints']) print('>>> Minimum..................:', result['minimum']) print('>>> 1st Quartile.............:', result['percentile25']) print('>>> Median...................:', result['median']) print('>>> Mean.....................:', result['mean']) print('>>> 3rd Quartile.............:', result['percentile75']) print('>>> Maximum..................:', result['maximum']) print('>>> ----------------------------------------') print('>>> Standard deviation.......:', result['std']) print('>>> Robust standard deviation:', result['robust_std']) print('>>> 0.1586553 percentile.....:', result['percentile15']) print('>>> 0.8413447 percentile.....:', result['percentile84']) print('>>> ========================================') return result
[ "def", "summary", "(", "x", ",", "rm_nan", "=", "False", ",", "debug", "=", "False", ")", ":", "# protections", "if", "type", "(", "x", ")", "is", "np", ".", "ndarray", ":", "xx", "=", "np", ".", "copy", "(", "x", ")", "else", ":", "if", "type", "(", "x", ")", "is", "list", ":", "xx", "=", "np", ".", "array", "(", "x", ")", "else", ":", "raise", "ValueError", "(", "'x='", "+", "str", "(", "x", ")", "+", "' must be a numpy.ndarray'", ")", "if", "xx", ".", "ndim", "is", "not", "1", ":", "raise", "ValueError", "(", "'xx.dim='", "+", "str", "(", "xx", ".", "ndim", ")", "+", "' must be 1'", ")", "# filter out NaN's", "if", "rm_nan", ":", "xx", "=", "xx", "[", "np", ".", "logical_not", "(", "np", ".", "isnan", "(", "xx", ")", ")", "]", "# compute basic statistics", "npoints", "=", "len", "(", "xx", ")", "ok", "=", "npoints", ">", "0", "result", "=", "{", "'npoints'", ":", "npoints", ",", "'minimum'", ":", "np", ".", "min", "(", "xx", ")", "if", "ok", "else", "0", ",", "'percentile25'", ":", "np", ".", "percentile", "(", "xx", ",", "25", ")", "if", "ok", "else", "0", ",", "'median'", ":", "np", ".", "percentile", "(", "xx", ",", "50", ")", "if", "ok", "else", "0", ",", "'mean'", ":", "np", ".", "mean", "(", "xx", ")", "if", "ok", "else", "0", ",", "'percentile75'", ":", "np", ".", "percentile", "(", "xx", ",", "75", ")", "if", "ok", "else", "0", ",", "'maximum'", ":", "np", ".", "max", "(", "xx", ")", "if", "ok", "else", "0", ",", "'std'", ":", "np", ".", "std", "(", "xx", ")", "if", "ok", "else", "0", ",", "'robust_std'", ":", "robust_std", "(", "xx", ")", "if", "ok", "else", "0", ",", "'percentile15'", ":", "np", ".", "percentile", "(", "xx", ",", "15.86553", ")", "if", "ok", "else", "0", ",", "'percentile84'", ":", "np", ".", "percentile", "(", "xx", ",", "84.13447", ")", "if", "ok", "else", "0", "}", "if", "debug", ":", "print", "(", "'>>> ========================================'", ")", "print", "(", "'>>> STATISTICAL SUMMARY:'", ")", "print", "(", "'>>> ----------------------------------------'", ")", "print", "(", "'>>> Number of points.........:'", ",", "result", "[", "'npoints'", "]", ")", "print", "(", "'>>> Minimum..................:'", ",", "result", "[", "'minimum'", "]", ")", "print", "(", "'>>> 1st Quartile.............:'", ",", "result", "[", "'percentile25'", "]", ")", "print", "(", "'>>> Median...................:'", ",", "result", "[", "'median'", "]", ")", "print", "(", "'>>> Mean.....................:'", ",", "result", "[", "'mean'", "]", ")", "print", "(", "'>>> 3rd Quartile.............:'", ",", "result", "[", "'percentile75'", "]", ")", "print", "(", "'>>> Maximum..................:'", ",", "result", "[", "'maximum'", "]", ")", "print", "(", "'>>> ----------------------------------------'", ")", "print", "(", "'>>> Standard deviation.......:'", ",", "result", "[", "'std'", "]", ")", "print", "(", "'>>> Robust standard deviation:'", ",", "result", "[", "'robust_std'", "]", ")", "print", "(", "'>>> 0.1586553 percentile.....:'", ",", "result", "[", "'percentile15'", "]", ")", "print", "(", "'>>> 0.8413447 percentile.....:'", ",", "result", "[", "'percentile84'", "]", ")", "print", "(", "'>>> ========================================'", ")", "return", "result" ]
Compute basic statistical parameters. Parameters ---------- x : 1d numpy array, float Input array with values which statistical properties are requested. rm_nan : bool If True, filter out NaN values before computing statistics. debug : bool If True prints computed values. Returns ------- result : Python dictionary Number of points, minimum, percentile 25, percentile 50 (median), mean, percentile 75, maximum, standard deviation, robust standard deviation, percentile 15.866 (equivalent to -1 sigma in a normal distribution) and percentile 84.134 (+1 sigma).
[ "Compute", "basic", "statistical", "parameters", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/stats.py#L51-L126
train
guaix-ucm/numina
numina/array/trace/traces.py
fit_trace_polynomial
def fit_trace_polynomial(trace, deg, axis=0): ''' Fit a trace information table to a polynomial. Parameters ---------- trace A 2D array, 2 columns and n rows deg : int Degree of polynomial axis : {0, 1} Spatial axis of the array (0 is Y, 1 is X). ''' dispaxis = axis_to_dispaxis(axis) # FIT to a polynomial pfit = numpy.polyfit(trace[:,0], trace[:,1], deg) start = trace[0,0] stop = trace[-1,0], return PolyTrace(start, stop, axis, pfit)
python
def fit_trace_polynomial(trace, deg, axis=0): ''' Fit a trace information table to a polynomial. Parameters ---------- trace A 2D array, 2 columns and n rows deg : int Degree of polynomial axis : {0, 1} Spatial axis of the array (0 is Y, 1 is X). ''' dispaxis = axis_to_dispaxis(axis) # FIT to a polynomial pfit = numpy.polyfit(trace[:,0], trace[:,1], deg) start = trace[0,0] stop = trace[-1,0], return PolyTrace(start, stop, axis, pfit)
[ "def", "fit_trace_polynomial", "(", "trace", ",", "deg", ",", "axis", "=", "0", ")", ":", "dispaxis", "=", "axis_to_dispaxis", "(", "axis", ")", "# FIT to a polynomial", "pfit", "=", "numpy", ".", "polyfit", "(", "trace", "[", ":", ",", "0", "]", ",", "trace", "[", ":", ",", "1", "]", ",", "deg", ")", "start", "=", "trace", "[", "0", ",", "0", "]", "stop", "=", "trace", "[", "-", "1", ",", "0", "]", ",", "return", "PolyTrace", "(", "start", ",", "stop", ",", "axis", ",", "pfit", ")" ]
Fit a trace information table to a polynomial. Parameters ---------- trace A 2D array, 2 columns and n rows deg : int Degree of polynomial axis : {0, 1} Spatial axis of the array (0 is Y, 1 is X).
[ "Fit", "a", "trace", "information", "table", "to", "a", "polynomial", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/trace/traces.py#L74-L94
train
druids/django-chamber
chamber/models/humanized_helpers/__init__.py
price_humanized
def price_humanized(value, inst, currency=None): """ Return a humanized price """ return (natural_number_with_currency(value, ugettext('CZK') if currency is None else currency) if value is not None else ugettext('(None)'))
python
def price_humanized(value, inst, currency=None): """ Return a humanized price """ return (natural_number_with_currency(value, ugettext('CZK') if currency is None else currency) if value is not None else ugettext('(None)'))
[ "def", "price_humanized", "(", "value", ",", "inst", ",", "currency", "=", "None", ")", ":", "return", "(", "natural_number_with_currency", "(", "value", ",", "ugettext", "(", "'CZK'", ")", "if", "currency", "is", "None", "else", "currency", ")", "if", "value", "is", "not", "None", "else", "ugettext", "(", "'(None)'", ")", ")" ]
Return a humanized price
[ "Return", "a", "humanized", "price" ]
eef4169923557e96877a664fa254e8c0814f3f23
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/humanized_helpers/__init__.py#L6-L11
train
guaix-ucm/numina
numina/datamodel.py
DataModel.get_imgid
def get_imgid(self, img): """Obtain a unique identifier of the image. Parameters ---------- img : astropy.io.fits.HDUList Returns ------- str: Identification of the image """ imgid = img.filename() # More heuristics here... # get FILENAME keyword, CHECKSUM, for example... hdr = self.get_header(img) if 'checksum' in hdr: return hdr['checksum'] if 'filename' in hdr: return hdr['filename'] if not imgid: imgid = repr(img) return imgid
python
def get_imgid(self, img): """Obtain a unique identifier of the image. Parameters ---------- img : astropy.io.fits.HDUList Returns ------- str: Identification of the image """ imgid = img.filename() # More heuristics here... # get FILENAME keyword, CHECKSUM, for example... hdr = self.get_header(img) if 'checksum' in hdr: return hdr['checksum'] if 'filename' in hdr: return hdr['filename'] if not imgid: imgid = repr(img) return imgid
[ "def", "get_imgid", "(", "self", ",", "img", ")", ":", "imgid", "=", "img", ".", "filename", "(", ")", "# More heuristics here...", "# get FILENAME keyword, CHECKSUM, for example...", "hdr", "=", "self", ".", "get_header", "(", "img", ")", "if", "'checksum'", "in", "hdr", ":", "return", "hdr", "[", "'checksum'", "]", "if", "'filename'", "in", "hdr", ":", "return", "hdr", "[", "'filename'", "]", "if", "not", "imgid", ":", "imgid", "=", "repr", "(", "img", ")", "return", "imgid" ]
Obtain a unique identifier of the image. Parameters ---------- img : astropy.io.fits.HDUList Returns ------- str: Identification of the image
[ "Obtain", "a", "unique", "identifier", "of", "the", "image", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/datamodel.py#L164-L191
train
pylp/pylp
pylp/lib/runner.py
TaskRunner.log_starting
def log_starting(self): """Log that the task has started.""" self.start_time = time.perf_counter() logger.log("Starting '", logger.cyan(self.name), "'...")
python
def log_starting(self): """Log that the task has started.""" self.start_time = time.perf_counter() logger.log("Starting '", logger.cyan(self.name), "'...")
[ "def", "log_starting", "(", "self", ")", ":", "self", ".", "start_time", "=", "time", ".", "perf_counter", "(", ")", "logger", ".", "log", "(", "\"Starting '\"", ",", "logger", ".", "cyan", "(", "self", ".", "name", ")", ",", "\"'...\"", ")" ]
Log that the task has started.
[ "Log", "that", "the", "task", "has", "started", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L50-L53
train
pylp/pylp
pylp/lib/runner.py
TaskRunner.log_finished
def log_finished(self): """Log that this task is done.""" delta = time.perf_counter() - self.start_time logger.log("Finished '", logger.cyan(self.name), "' after ", logger.magenta(time_to_text(delta)))
python
def log_finished(self): """Log that this task is done.""" delta = time.perf_counter() - self.start_time logger.log("Finished '", logger.cyan(self.name), "' after ", logger.magenta(time_to_text(delta)))
[ "def", "log_finished", "(", "self", ")", ":", "delta", "=", "time", ".", "perf_counter", "(", ")", "-", "self", ".", "start_time", "logger", ".", "log", "(", "\"Finished '\"", ",", "logger", ".", "cyan", "(", "self", ".", "name", ")", ",", "\"' after \"", ",", "logger", ".", "magenta", "(", "time_to_text", "(", "delta", ")", ")", ")" ]
Log that this task is done.
[ "Log", "that", "this", "task", "is", "done", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L56-L60
train
pylp/pylp
pylp/lib/runner.py
TaskRunner.call_task_fn
def call_task_fn(self): """Call the function attached to the task.""" if not self.fn: return self.log_finished() future = asyncio.Future() future.add_done_callback(lambda x: self.log_finished()) if inspect.iscoroutinefunction(self.fn): f = asyncio.ensure_future(self.fn()) f.add_done_callback(lambda x: self.bind_end(x.result(), future)) else: self.bind_end(self.fn(), future) return future
python
def call_task_fn(self): """Call the function attached to the task.""" if not self.fn: return self.log_finished() future = asyncio.Future() future.add_done_callback(lambda x: self.log_finished()) if inspect.iscoroutinefunction(self.fn): f = asyncio.ensure_future(self.fn()) f.add_done_callback(lambda x: self.bind_end(x.result(), future)) else: self.bind_end(self.fn(), future) return future
[ "def", "call_task_fn", "(", "self", ")", ":", "if", "not", "self", ".", "fn", ":", "return", "self", ".", "log_finished", "(", ")", "future", "=", "asyncio", ".", "Future", "(", ")", "future", ".", "add_done_callback", "(", "lambda", "x", ":", "self", ".", "log_finished", "(", ")", ")", "if", "inspect", ".", "iscoroutinefunction", "(", "self", ".", "fn", ")", ":", "f", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "fn", "(", ")", ")", "f", ".", "add_done_callback", "(", "lambda", "x", ":", "self", ".", "bind_end", "(", "x", ".", "result", "(", ")", ",", "future", ")", ")", "else", ":", "self", ".", "bind_end", "(", "self", ".", "fn", "(", ")", ",", "future", ")", "return", "future" ]
Call the function attached to the task.
[ "Call", "the", "function", "attached", "to", "the", "task", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L63-L77
train
pylp/pylp
pylp/lib/runner.py
TaskRunner.bind_end
def bind_end(self, stream, future): """Bind a 'TaskEndTransformer' to a stream.""" if not isinstance(stream, Stream): future.set_result(None) else: stream.pipe(TaskEndTransformer(future))
python
def bind_end(self, stream, future): """Bind a 'TaskEndTransformer' to a stream.""" if not isinstance(stream, Stream): future.set_result(None) else: stream.pipe(TaskEndTransformer(future))
[ "def", "bind_end", "(", "self", ",", "stream", ",", "future", ")", ":", "if", "not", "isinstance", "(", "stream", ",", "Stream", ")", ":", "future", ".", "set_result", "(", "None", ")", "else", ":", "stream", ".", "pipe", "(", "TaskEndTransformer", "(", "future", ")", ")" ]
Bind a 'TaskEndTransformer' to a stream.
[ "Bind", "a", "TaskEndTransformer", "to", "a", "stream", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L80-L85
train
pylp/pylp
pylp/lib/runner.py
TaskRunner.start_deps
async def start_deps(self, deps): """Start running dependencies.""" # Get only new dependencies deps = list(filter(lambda dep: dep not in self.called, deps)) self.called += deps # Start only existing dependencies runners = list(filter(lambda x: x and x.future, map(lambda dep: pylp.start(dep), deps))) if len(runners) != 0: await asyncio.wait(map(lambda runner: runner.future, runners)) # Call the attached function future = self.call_task_fn() if future: await future
python
async def start_deps(self, deps): """Start running dependencies.""" # Get only new dependencies deps = list(filter(lambda dep: dep not in self.called, deps)) self.called += deps # Start only existing dependencies runners = list(filter(lambda x: x and x.future, map(lambda dep: pylp.start(dep), deps))) if len(runners) != 0: await asyncio.wait(map(lambda runner: runner.future, runners)) # Call the attached function future = self.call_task_fn() if future: await future
[ "async", "def", "start_deps", "(", "self", ",", "deps", ")", ":", "# Get only new dependencies", "deps", "=", "list", "(", "filter", "(", "lambda", "dep", ":", "dep", "not", "in", "self", ".", "called", ",", "deps", ")", ")", "self", ".", "called", "+=", "deps", "# Start only existing dependencies", "runners", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "and", "x", ".", "future", ",", "map", "(", "lambda", "dep", ":", "pylp", ".", "start", "(", "dep", ")", ",", "deps", ")", ")", ")", "if", "len", "(", "runners", ")", "!=", "0", ":", "await", "asyncio", ".", "wait", "(", "map", "(", "lambda", "runner", ":", "runner", ".", "future", ",", "runners", ")", ")", "# Call the attached function ", "future", "=", "self", ".", "call_task_fn", "(", ")", "if", "future", ":", "await", "future" ]
Start running dependencies.
[ "Start", "running", "dependencies", "." ]
7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/runner.py#L88-L103
train
xflr6/bitsets
bitsets/bases.py
MemberBits.frommembers
def frommembers(cls, members=()): """Create a set from an iterable of members.""" return cls.fromint(sum(map(cls._map.__getitem__, set(members))))
python
def frommembers(cls, members=()): """Create a set from an iterable of members.""" return cls.fromint(sum(map(cls._map.__getitem__, set(members))))
[ "def", "frommembers", "(", "cls", ",", "members", "=", "(", ")", ")", ":", "return", "cls", ".", "fromint", "(", "sum", "(", "map", "(", "cls", ".", "_map", ".", "__getitem__", ",", "set", "(", "members", ")", ")", ")", ")" ]
Create a set from an iterable of members.
[ "Create", "a", "set", "from", "an", "iterable", "of", "members", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L31-L33
train
xflr6/bitsets
bitsets/bases.py
MemberBits.frombools
def frombools(cls, bools=()): """Create a set from an iterable of boolean evaluable items.""" return cls.fromint(sum(compress(cls._atoms, bools)))
python
def frombools(cls, bools=()): """Create a set from an iterable of boolean evaluable items.""" return cls.fromint(sum(compress(cls._atoms, bools)))
[ "def", "frombools", "(", "cls", ",", "bools", "=", "(", ")", ")", ":", "return", "cls", ".", "fromint", "(", "sum", "(", "compress", "(", "cls", ".", "_atoms", ",", "bools", ")", ")", ")" ]
Create a set from an iterable of boolean evaluable items.
[ "Create", "a", "set", "from", "an", "iterable", "of", "boolean", "evaluable", "items", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L36-L38
train
xflr6/bitsets
bitsets/bases.py
MemberBits.frombits
def frombits(cls, bits='0'): """Create a set from binary string.""" if len(bits) > cls._len: raise ValueError('too many bits %r' % (bits,)) return cls.fromint(bits[::-1], 2)
python
def frombits(cls, bits='0'): """Create a set from binary string.""" if len(bits) > cls._len: raise ValueError('too many bits %r' % (bits,)) return cls.fromint(bits[::-1], 2)
[ "def", "frombits", "(", "cls", ",", "bits", "=", "'0'", ")", ":", "if", "len", "(", "bits", ")", ">", "cls", ".", "_len", ":", "raise", "ValueError", "(", "'too many bits %r'", "%", "(", "bits", ",", ")", ")", "return", "cls", ".", "fromint", "(", "bits", "[", ":", ":", "-", "1", "]", ",", "2", ")" ]
Create a set from binary string.
[ "Create", "a", "set", "from", "binary", "string", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L41-L45
train
xflr6/bitsets
bitsets/bases.py
MemberBits.atoms
def atoms(self, reverse=False): """Yield the singleton for every set member.""" if reverse: return filter(self.__and__, reversed(self._atoms)) return filter(self.__and__, self._atoms)
python
def atoms(self, reverse=False): """Yield the singleton for every set member.""" if reverse: return filter(self.__and__, reversed(self._atoms)) return filter(self.__and__, self._atoms)
[ "def", "atoms", "(", "self", ",", "reverse", "=", "False", ")", ":", "if", "reverse", ":", "return", "filter", "(", "self", ".", "__and__", ",", "reversed", "(", "self", ".", "_atoms", ")", ")", "return", "filter", "(", "self", ".", "__and__", ",", "self", ".", "_atoms", ")" ]
Yield the singleton for every set member.
[ "Yield", "the", "singleton", "for", "every", "set", "member", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L77-L81
train
xflr6/bitsets
bitsets/bases.py
MemberBits.inatoms
def inatoms(self, reverse=False): """Yield the singleton for every non-member.""" if reverse: return filterfalse(self.__and__, reversed(self._atoms)) return filterfalse(self.__and__, self._atoms)
python
def inatoms(self, reverse=False): """Yield the singleton for every non-member.""" if reverse: return filterfalse(self.__and__, reversed(self._atoms)) return filterfalse(self.__and__, self._atoms)
[ "def", "inatoms", "(", "self", ",", "reverse", "=", "False", ")", ":", "if", "reverse", ":", "return", "filterfalse", "(", "self", ".", "__and__", ",", "reversed", "(", "self", ".", "_atoms", ")", ")", "return", "filterfalse", "(", "self", ".", "__and__", ",", "self", ".", "_atoms", ")" ]
Yield the singleton for every non-member.
[ "Yield", "the", "singleton", "for", "every", "non", "-", "member", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L83-L87
train
xflr6/bitsets
bitsets/bases.py
MemberBits.powerset
def powerset(self, start=None, excludestart=False): """Yield combinations from start to self in short lexicographic order.""" if start is None: start = self.infimum other = self.atoms() else: if self | start != self: raise ValueError('%r is no subset of %r' % (start, self)) other = self.fromint(self & ~start).atoms() return map(self.frombitset, combos.shortlex(start, list(other)))
python
def powerset(self, start=None, excludestart=False): """Yield combinations from start to self in short lexicographic order.""" if start is None: start = self.infimum other = self.atoms() else: if self | start != self: raise ValueError('%r is no subset of %r' % (start, self)) other = self.fromint(self & ~start).atoms() return map(self.frombitset, combos.shortlex(start, list(other)))
[ "def", "powerset", "(", "self", ",", "start", "=", "None", ",", "excludestart", "=", "False", ")", ":", "if", "start", "is", "None", ":", "start", "=", "self", ".", "infimum", "other", "=", "self", ".", "atoms", "(", ")", "else", ":", "if", "self", "|", "start", "!=", "self", ":", "raise", "ValueError", "(", "'%r is no subset of %r'", "%", "(", "start", ",", "self", ")", ")", "other", "=", "self", ".", "fromint", "(", "self", "&", "~", "start", ")", ".", "atoms", "(", ")", "return", "map", "(", "self", ".", "frombitset", ",", "combos", ".", "shortlex", "(", "start", ",", "list", "(", "other", ")", ")", ")" ]
Yield combinations from start to self in short lexicographic order.
[ "Yield", "combinations", "from", "start", "to", "self", "in", "short", "lexicographic", "order", "." ]
ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf
https://github.com/xflr6/bitsets/blob/ddcfe17e7c7a11f71f1c6764b2cecf7db05d9cdf/bitsets/bases.py#L89-L98
train
druids/django-chamber
chamber/shortcuts.py
change
def change(obj, **changed_fields): """ Changes a given `changed_fields` on object and returns changed object. """ obj_field_names = { field.name for field in obj._meta.fields } | { field.attname for field in obj._meta.fields } | {'pk'} for field_name, value in changed_fields.items(): if field_name not in obj_field_names: raise ValueError("'{}' is an invalid field name".format(field_name)) setattr(obj, field_name, value) return obj
python
def change(obj, **changed_fields): """ Changes a given `changed_fields` on object and returns changed object. """ obj_field_names = { field.name for field in obj._meta.fields } | { field.attname for field in obj._meta.fields } | {'pk'} for field_name, value in changed_fields.items(): if field_name not in obj_field_names: raise ValueError("'{}' is an invalid field name".format(field_name)) setattr(obj, field_name, value) return obj
[ "def", "change", "(", "obj", ",", "*", "*", "changed_fields", ")", ":", "obj_field_names", "=", "{", "field", ".", "name", "for", "field", "in", "obj", ".", "_meta", ".", "fields", "}", "|", "{", "field", ".", "attname", "for", "field", "in", "obj", ".", "_meta", ".", "fields", "}", "|", "{", "'pk'", "}", "for", "field_name", ",", "value", "in", "changed_fields", ".", "items", "(", ")", ":", "if", "field_name", "not", "in", "obj_field_names", ":", "raise", "ValueError", "(", "\"'{}' is an invalid field name\"", ".", "format", "(", "field_name", ")", ")", "setattr", "(", "obj", ",", "field_name", ",", "value", ")", "return", "obj" ]
Changes a given `changed_fields` on object and returns changed object.
[ "Changes", "a", "given", "changed_fields", "on", "object", "and", "returns", "changed", "object", "." ]
eef4169923557e96877a664fa254e8c0814f3f23
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/shortcuts.py#L54-L68
train
druids/django-chamber
chamber/shortcuts.py
change_and_save
def change_and_save(obj, update_only_changed_fields=False, save_kwargs=None, **changed_fields): """ Changes a given `changed_fields` on object, saves it and returns changed object. """ from chamber.models import SmartModel change(obj, **changed_fields) if update_only_changed_fields and not isinstance(obj, SmartModel): raise TypeError('update_only_changed_fields can be used only with SmartModel') save_kwargs = save_kwargs if save_kwargs is not None else {} if update_only_changed_fields: save_kwargs['update_only_changed_fields'] = True obj.save(**save_kwargs) return obj
python
def change_and_save(obj, update_only_changed_fields=False, save_kwargs=None, **changed_fields): """ Changes a given `changed_fields` on object, saves it and returns changed object. """ from chamber.models import SmartModel change(obj, **changed_fields) if update_only_changed_fields and not isinstance(obj, SmartModel): raise TypeError('update_only_changed_fields can be used only with SmartModel') save_kwargs = save_kwargs if save_kwargs is not None else {} if update_only_changed_fields: save_kwargs['update_only_changed_fields'] = True obj.save(**save_kwargs) return obj
[ "def", "change_and_save", "(", "obj", ",", "update_only_changed_fields", "=", "False", ",", "save_kwargs", "=", "None", ",", "*", "*", "changed_fields", ")", ":", "from", "chamber", ".", "models", "import", "SmartModel", "change", "(", "obj", ",", "*", "*", "changed_fields", ")", "if", "update_only_changed_fields", "and", "not", "isinstance", "(", "obj", ",", "SmartModel", ")", ":", "raise", "TypeError", "(", "'update_only_changed_fields can be used only with SmartModel'", ")", "save_kwargs", "=", "save_kwargs", "if", "save_kwargs", "is", "not", "None", "else", "{", "}", "if", "update_only_changed_fields", ":", "save_kwargs", "[", "'update_only_changed_fields'", "]", "=", "True", "obj", ".", "save", "(", "*", "*", "save_kwargs", ")", "return", "obj" ]
Changes a given `changed_fields` on object, saves it and returns changed object.
[ "Changes", "a", "given", "changed_fields", "on", "object", "saves", "it", "and", "returns", "changed", "object", "." ]
eef4169923557e96877a664fa254e8c0814f3f23
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/shortcuts.py#L71-L86
train
druids/django-chamber
chamber/shortcuts.py
bulk_change_and_save
def bulk_change_and_save(iterable, update_only_changed_fields=False, save_kwargs=None, **changed_fields): """ Changes a given `changed_fields` on each object in a given `iterable`, saves objects and returns the changed objects. """ return [ change_and_save(obj, update_only_changed_fields=update_only_changed_fields, save_kwargs=save_kwargs, **changed_fields) for obj in iterable ]
python
def bulk_change_and_save(iterable, update_only_changed_fields=False, save_kwargs=None, **changed_fields): """ Changes a given `changed_fields` on each object in a given `iterable`, saves objects and returns the changed objects. """ return [ change_and_save(obj, update_only_changed_fields=update_only_changed_fields, save_kwargs=save_kwargs, **changed_fields) for obj in iterable ]
[ "def", "bulk_change_and_save", "(", "iterable", ",", "update_only_changed_fields", "=", "False", ",", "save_kwargs", "=", "None", ",", "*", "*", "changed_fields", ")", ":", "return", "[", "change_and_save", "(", "obj", ",", "update_only_changed_fields", "=", "update_only_changed_fields", ",", "save_kwargs", "=", "save_kwargs", ",", "*", "*", "changed_fields", ")", "for", "obj", "in", "iterable", "]" ]
Changes a given `changed_fields` on each object in a given `iterable`, saves objects and returns the changed objects.
[ "Changes", "a", "given", "changed_fields", "on", "each", "object", "in", "a", "given", "iterable", "saves", "objects", "and", "returns", "the", "changed", "objects", "." ]
eef4169923557e96877a664fa254e8c0814f3f23
https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/shortcuts.py#L96-L105
train
guaix-ucm/numina
numina/modeling/gaussbox.py
gauss_box_model
def gauss_box_model(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5): """Integrate a Gaussian profile.""" z = (x - mean) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev return amplitude * (norm.cdf(z2) - norm.cdf(z1))
python
def gauss_box_model(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5): """Integrate a Gaussian profile.""" z = (x - mean) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev return amplitude * (norm.cdf(z2) - norm.cdf(z1))
[ "def", "gauss_box_model", "(", "x", ",", "amplitude", "=", "1.0", ",", "mean", "=", "0.0", ",", "stddev", "=", "1.0", ",", "hpix", "=", "0.5", ")", ":", "z", "=", "(", "x", "-", "mean", ")", "/", "stddev", "z2", "=", "z", "+", "hpix", "/", "stddev", "z1", "=", "z", "-", "hpix", "/", "stddev", "return", "amplitude", "*", "(", "norm", ".", "cdf", "(", "z2", ")", "-", "norm", ".", "cdf", "(", "z1", ")", ")" ]
Integrate a Gaussian profile.
[ "Integrate", "a", "Gaussian", "profile", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/modeling/gaussbox.py#L24-L29
train
guaix-ucm/numina
numina/modeling/gaussbox.py
gauss_box_model_deriv
def gauss_box_model_deriv(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5): """Derivative of the integral of a Gaussian profile.""" z = (x - mean) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev da = norm.cdf(z2) - norm.cdf(z1) fp2 = norm_pdf_t(z2) fp1 = norm_pdf_t(z1) dl = -amplitude / stddev * (fp2 - fp1) ds = -amplitude / stddev * (fp2 * z2 - fp1 * z1) dd = amplitude / stddev * (fp2 + fp1) return da, dl, ds, dd
python
def gauss_box_model_deriv(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5): """Derivative of the integral of a Gaussian profile.""" z = (x - mean) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev da = norm.cdf(z2) - norm.cdf(z1) fp2 = norm_pdf_t(z2) fp1 = norm_pdf_t(z1) dl = -amplitude / stddev * (fp2 - fp1) ds = -amplitude / stddev * (fp2 * z2 - fp1 * z1) dd = amplitude / stddev * (fp2 + fp1) return da, dl, ds, dd
[ "def", "gauss_box_model_deriv", "(", "x", ",", "amplitude", "=", "1.0", ",", "mean", "=", "0.0", ",", "stddev", "=", "1.0", ",", "hpix", "=", "0.5", ")", ":", "z", "=", "(", "x", "-", "mean", ")", "/", "stddev", "z2", "=", "z", "+", "hpix", "/", "stddev", "z1", "=", "z", "-", "hpix", "/", "stddev", "da", "=", "norm", ".", "cdf", "(", "z2", ")", "-", "norm", ".", "cdf", "(", "z1", ")", "fp2", "=", "norm_pdf_t", "(", "z2", ")", "fp1", "=", "norm_pdf_t", "(", "z1", ")", "dl", "=", "-", "amplitude", "/", "stddev", "*", "(", "fp2", "-", "fp1", ")", "ds", "=", "-", "amplitude", "/", "stddev", "*", "(", "fp2", "*", "z2", "-", "fp1", "*", "z1", ")", "dd", "=", "amplitude", "/", "stddev", "*", "(", "fp2", "+", "fp1", ")", "return", "da", ",", "dl", ",", "ds", ",", "dd" ]
Derivative of the integral of a Gaussian profile.
[ "Derivative", "of", "the", "integral", "of", "a", "Gaussian", "profile", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/modeling/gaussbox.py#L32-L47
train
guaix-ucm/numina
numina/array/wavecalib/peaks_spectrum.py
find_peaks_spectrum
def find_peaks_spectrum(sx, nwinwidth, threshold=0, debugplot=0): """Find peaks in array. The algorithm imposes that the signal at both sides of the peak decreases monotonically. Parameters ---------- sx : 1d numpy array, floats Input array. nwinwidth : int Width of the window where each peak must be found. threshold : float Minimum signal in the peaks. debugplot : int Determines whether intermediate computations and/or plots are displayed: 00 : no debug, no plots 01 : no debug, plots without pauses 02 : no debug, plots with pauses 10 : debug, no plots 11 : debug, plots without pauses 12 : debug, plots with pauses Returns ------- ixpeaks : 1d numpy array, int Peak locations, in array coordinates (integers). """ if type(sx) is not np.ndarray: raise ValueError("sx=" + str(sx) + " must be a numpy.ndarray") elif sx.ndim is not 1: raise ValueError("sx.ndim=" + str(sx.ndim) + " must be 1") sx_shape = sx.shape nmed = nwinwidth//2 if debugplot >= 10: print('find_peaks_spectrum> sx shape......:', sx_shape) print('find_peaks_spectrum> nwinwidth.....:', nwinwidth) print('find_peaks_spectrum> nmed..........:', nmed) print('find_peaks_spectrum> data_threshold:', threshold) print('find_peaks_spectrum> the first and last', nmed, 'pixels will be ignored') xpeaks = [] # list to store the peaks if sx_shape[0] < nwinwidth: print('find_peaks_spectrum> sx shape......:', sx_shape) print('find_peaks_spectrum> nwinwidth.....:', nwinwidth) raise ValueError('sx.shape < nwinwidth') i = nmed while i < sx_shape[0] - nmed: if sx[i] > threshold: peak_ok = True j = 0 loop = True while loop: if sx[i - nmed + j] > sx[i - nmed + j + 1]: peak_ok = False j += 1 loop = (j < nmed) and peak_ok if peak_ok: j = nmed + 1 loop = True while loop: if sx[i - nmed + j - 1] < sx[i - nmed + j]: peak_ok = False j += 1 loop = (j < nwinwidth) and peak_ok if peak_ok: xpeaks.append(i) i += nwinwidth - 1 else: i += 1 else: i += 1 ixpeaks = np.array(xpeaks) if debugplot >= 10: print('find_peaks_spectrum> number of peaks found:', len(ixpeaks)) print(ixpeaks) return ixpeaks
python
def find_peaks_spectrum(sx, nwinwidth, threshold=0, debugplot=0): """Find peaks in array. The algorithm imposes that the signal at both sides of the peak decreases monotonically. Parameters ---------- sx : 1d numpy array, floats Input array. nwinwidth : int Width of the window where each peak must be found. threshold : float Minimum signal in the peaks. debugplot : int Determines whether intermediate computations and/or plots are displayed: 00 : no debug, no plots 01 : no debug, plots without pauses 02 : no debug, plots with pauses 10 : debug, no plots 11 : debug, plots without pauses 12 : debug, plots with pauses Returns ------- ixpeaks : 1d numpy array, int Peak locations, in array coordinates (integers). """ if type(sx) is not np.ndarray: raise ValueError("sx=" + str(sx) + " must be a numpy.ndarray") elif sx.ndim is not 1: raise ValueError("sx.ndim=" + str(sx.ndim) + " must be 1") sx_shape = sx.shape nmed = nwinwidth//2 if debugplot >= 10: print('find_peaks_spectrum> sx shape......:', sx_shape) print('find_peaks_spectrum> nwinwidth.....:', nwinwidth) print('find_peaks_spectrum> nmed..........:', nmed) print('find_peaks_spectrum> data_threshold:', threshold) print('find_peaks_spectrum> the first and last', nmed, 'pixels will be ignored') xpeaks = [] # list to store the peaks if sx_shape[0] < nwinwidth: print('find_peaks_spectrum> sx shape......:', sx_shape) print('find_peaks_spectrum> nwinwidth.....:', nwinwidth) raise ValueError('sx.shape < nwinwidth') i = nmed while i < sx_shape[0] - nmed: if sx[i] > threshold: peak_ok = True j = 0 loop = True while loop: if sx[i - nmed + j] > sx[i - nmed + j + 1]: peak_ok = False j += 1 loop = (j < nmed) and peak_ok if peak_ok: j = nmed + 1 loop = True while loop: if sx[i - nmed + j - 1] < sx[i - nmed + j]: peak_ok = False j += 1 loop = (j < nwinwidth) and peak_ok if peak_ok: xpeaks.append(i) i += nwinwidth - 1 else: i += 1 else: i += 1 ixpeaks = np.array(xpeaks) if debugplot >= 10: print('find_peaks_spectrum> number of peaks found:', len(ixpeaks)) print(ixpeaks) return ixpeaks
[ "def", "find_peaks_spectrum", "(", "sx", ",", "nwinwidth", ",", "threshold", "=", "0", ",", "debugplot", "=", "0", ")", ":", "if", "type", "(", "sx", ")", "is", "not", "np", ".", "ndarray", ":", "raise", "ValueError", "(", "\"sx=\"", "+", "str", "(", "sx", ")", "+", "\" must be a numpy.ndarray\"", ")", "elif", "sx", ".", "ndim", "is", "not", "1", ":", "raise", "ValueError", "(", "\"sx.ndim=\"", "+", "str", "(", "sx", ".", "ndim", ")", "+", "\" must be 1\"", ")", "sx_shape", "=", "sx", ".", "shape", "nmed", "=", "nwinwidth", "//", "2", "if", "debugplot", ">=", "10", ":", "print", "(", "'find_peaks_spectrum> sx shape......:'", ",", "sx_shape", ")", "print", "(", "'find_peaks_spectrum> nwinwidth.....:'", ",", "nwinwidth", ")", "print", "(", "'find_peaks_spectrum> nmed..........:'", ",", "nmed", ")", "print", "(", "'find_peaks_spectrum> data_threshold:'", ",", "threshold", ")", "print", "(", "'find_peaks_spectrum> the first and last'", ",", "nmed", ",", "'pixels will be ignored'", ")", "xpeaks", "=", "[", "]", "# list to store the peaks", "if", "sx_shape", "[", "0", "]", "<", "nwinwidth", ":", "print", "(", "'find_peaks_spectrum> sx shape......:'", ",", "sx_shape", ")", "print", "(", "'find_peaks_spectrum> nwinwidth.....:'", ",", "nwinwidth", ")", "raise", "ValueError", "(", "'sx.shape < nwinwidth'", ")", "i", "=", "nmed", "while", "i", "<", "sx_shape", "[", "0", "]", "-", "nmed", ":", "if", "sx", "[", "i", "]", ">", "threshold", ":", "peak_ok", "=", "True", "j", "=", "0", "loop", "=", "True", "while", "loop", ":", "if", "sx", "[", "i", "-", "nmed", "+", "j", "]", ">", "sx", "[", "i", "-", "nmed", "+", "j", "+", "1", "]", ":", "peak_ok", "=", "False", "j", "+=", "1", "loop", "=", "(", "j", "<", "nmed", ")", "and", "peak_ok", "if", "peak_ok", ":", "j", "=", "nmed", "+", "1", "loop", "=", "True", "while", "loop", ":", "if", "sx", "[", "i", "-", "nmed", "+", "j", "-", "1", "]", "<", "sx", "[", "i", "-", "nmed", "+", "j", "]", ":", "peak_ok", "=", "False", "j", "+=", "1", "loop", "=", "(", "j", "<", "nwinwidth", ")", "and", "peak_ok", "if", "peak_ok", ":", "xpeaks", ".", "append", "(", "i", ")", "i", "+=", "nwinwidth", "-", "1", "else", ":", "i", "+=", "1", "else", ":", "i", "+=", "1", "ixpeaks", "=", "np", ".", "array", "(", "xpeaks", ")", "if", "debugplot", ">=", "10", ":", "print", "(", "'find_peaks_spectrum> number of peaks found:'", ",", "len", "(", "ixpeaks", ")", ")", "print", "(", "ixpeaks", ")", "return", "ixpeaks" ]
Find peaks in array. The algorithm imposes that the signal at both sides of the peak decreases monotonically. Parameters ---------- sx : 1d numpy array, floats Input array. nwinwidth : int Width of the window where each peak must be found. threshold : float Minimum signal in the peaks. debugplot : int Determines whether intermediate computations and/or plots are displayed: 00 : no debug, no plots 01 : no debug, plots without pauses 02 : no debug, plots with pauses 10 : debug, no plots 11 : debug, plots without pauses 12 : debug, plots with pauses Returns ------- ixpeaks : 1d numpy array, int Peak locations, in array coordinates (integers).
[ "Find", "peaks", "in", "array", "." ]
6c829495df8937f77c2de9383c1038ffb3e713e3
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/peaks_spectrum.py#L19-L106
train