code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _format_output(selected_number, raw_data):
"""Format data to get a readable output"""
tmp_data = {}
data = collections.defaultdict(lambda: 0)
balance = raw_data.pop('balance')
for number in raw_data.keys():
tmp_data = dict([(k, int(v) if v is not None else "No limit")
for k, v in raw_data[number].items()])
tmp_data['number'] = number
if selected_number is None or selected_number == number:
data[number] = tmp_data
output = ("""Account Balance
=======
Balance: {:.2f} $
""")
print(output.format(balance))
for number_data in data.values():
_print_number(number_data) | def function[_format_output, parameter[selected_number, raw_data]]:
constant[Format data to get a readable output]
variable[tmp_data] assign[=] dictionary[[], []]
variable[data] assign[=] call[name[collections].defaultdict, parameter[<ast.Lambda object at 0x7da1b179c7f0>]]
variable[balance] assign[=] call[name[raw_data].pop, parameter[constant[balance]]]
for taget[name[number]] in starred[call[name[raw_data].keys, parameter[]]] begin[:]
variable[tmp_data] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b179cf40>]]
call[name[tmp_data]][constant[number]] assign[=] name[number]
if <ast.BoolOp object at 0x7da1b179d270> begin[:]
call[name[data]][name[number]] assign[=] name[tmp_data]
variable[output] assign[=] constant[Account Balance
=======
Balance: {:.2f} $
]
call[name[print], parameter[call[name[output].format, parameter[name[balance]]]]]
for taget[name[number_data]] in starred[call[name[data].values, parameter[]]] begin[:]
call[name[_print_number], parameter[name[number_data]]] | keyword[def] identifier[_format_output] ( identifier[selected_number] , identifier[raw_data] ):
literal[string]
identifier[tmp_data] ={}
identifier[data] = identifier[collections] . identifier[defaultdict] ( keyword[lambda] : literal[int] )
identifier[balance] = identifier[raw_data] . identifier[pop] ( literal[string] )
keyword[for] identifier[number] keyword[in] identifier[raw_data] . identifier[keys] ():
identifier[tmp_data] = identifier[dict] ([( identifier[k] , identifier[int] ( identifier[v] ) keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[raw_data] [ identifier[number] ]. identifier[items] ()])
identifier[tmp_data] [ literal[string] ]= identifier[number]
keyword[if] identifier[selected_number] keyword[is] keyword[None] keyword[or] identifier[selected_number] == identifier[number] :
identifier[data] [ identifier[number] ]= identifier[tmp_data]
identifier[output] =( literal[string] )
identifier[print] ( identifier[output] . identifier[format] ( identifier[balance] ))
keyword[for] identifier[number_data] keyword[in] identifier[data] . identifier[values] ():
identifier[_print_number] ( identifier[number_data] ) | def _format_output(selected_number, raw_data):
"""Format data to get a readable output"""
tmp_data = {}
data = collections.defaultdict(lambda : 0)
balance = raw_data.pop('balance')
for number in raw_data.keys():
tmp_data = dict([(k, int(v) if v is not None else 'No limit') for (k, v) in raw_data[number].items()])
tmp_data['number'] = number
if selected_number is None or selected_number == number:
data[number] = tmp_data # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['number']]
output = 'Account Balance\n=======\n\nBalance: {:.2f} $\n'
print(output.format(balance))
for number_data in data.values():
_print_number(number_data) # depends on [control=['for'], data=['number_data']] |
def fetch_raw(self, **kw):
"""
Fetch the records for this query. This fetch type will return the
results in raw dict format. It is possible to limit the number of
receives on the socket that return results before exiting by providing
max_recv.
This fetch should be used if you want to return only the result records
returned from the query in raw dict format. Any other dict key/values
from the raw query are ignored.
:param int max_recv: max number of socket receive calls before
returning from this query. If you want to wait longer for
results before returning, increase max_iterations (default: 0)
:return: list of query results
:rtype: list(dict)
"""
self.sockopt.update(kw)
with SMCSocketProtocol(self, **self.sockopt) as protocol:
for result in protocol.receive():
if 'records' in result and result['records'].get('added'):
yield result['records']['added'] | def function[fetch_raw, parameter[self]]:
constant[
Fetch the records for this query. This fetch type will return the
results in raw dict format. It is possible to limit the number of
receives on the socket that return results before exiting by providing
max_recv.
This fetch should be used if you want to return only the result records
returned from the query in raw dict format. Any other dict key/values
from the raw query are ignored.
:param int max_recv: max number of socket receive calls before
returning from this query. If you want to wait longer for
results before returning, increase max_iterations (default: 0)
:return: list of query results
:rtype: list(dict)
]
call[name[self].sockopt.update, parameter[name[kw]]]
with call[name[SMCSocketProtocol], parameter[name[self]]] begin[:]
for taget[name[result]] in starred[call[name[protocol].receive, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b1a2f640> begin[:]
<ast.Yield object at 0x7da1b1a2c7f0> | keyword[def] identifier[fetch_raw] ( identifier[self] ,** identifier[kw] ):
literal[string]
identifier[self] . identifier[sockopt] . identifier[update] ( identifier[kw] )
keyword[with] identifier[SMCSocketProtocol] ( identifier[self] ,** identifier[self] . identifier[sockopt] ) keyword[as] identifier[protocol] :
keyword[for] identifier[result] keyword[in] identifier[protocol] . identifier[receive] ():
keyword[if] literal[string] keyword[in] identifier[result] keyword[and] identifier[result] [ literal[string] ]. identifier[get] ( literal[string] ):
keyword[yield] identifier[result] [ literal[string] ][ literal[string] ] | def fetch_raw(self, **kw):
"""
Fetch the records for this query. This fetch type will return the
results in raw dict format. It is possible to limit the number of
receives on the socket that return results before exiting by providing
max_recv.
This fetch should be used if you want to return only the result records
returned from the query in raw dict format. Any other dict key/values
from the raw query are ignored.
:param int max_recv: max number of socket receive calls before
returning from this query. If you want to wait longer for
results before returning, increase max_iterations (default: 0)
:return: list of query results
:rtype: list(dict)
"""
self.sockopt.update(kw)
with SMCSocketProtocol(self, **self.sockopt) as protocol:
for result in protocol.receive():
if 'records' in result and result['records'].get('added'):
yield result['records']['added'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']] # depends on [control=['with'], data=['protocol']] |
def update(self, value=0):
"""
Update the value of the progress and update progress bar.
Parameters
-----------
value : integer
The current iteration of the progress
"""
self._deltas.append(time.time())
self.value = value
self._percent = 100.0 * self.value / self.num
if self.bar:
self._bars = self._bar_symbol*int(np.round(self._percent / 100. * self._barsize))
if (len(self._deltas) < 2) or (self._deltas[-1] - self._deltas[-2]) > 1e-1:
self._estimate_time()
self._draw()
if self.value == self.num:
self.end() | def function[update, parameter[self, value]]:
constant[
Update the value of the progress and update progress bar.
Parameters
-----------
value : integer
The current iteration of the progress
]
call[name[self]._deltas.append, parameter[call[name[time].time, parameter[]]]]
name[self].value assign[=] name[value]
name[self]._percent assign[=] binary_operation[binary_operation[constant[100.0] * name[self].value] / name[self].num]
if name[self].bar begin[:]
name[self]._bars assign[=] binary_operation[name[self]._bar_symbol * call[name[int], parameter[call[name[np].round, parameter[binary_operation[binary_operation[name[self]._percent / constant[100.0]] * name[self]._barsize]]]]]]
if <ast.BoolOp object at 0x7da18ede7c40> begin[:]
call[name[self]._estimate_time, parameter[]]
call[name[self]._draw, parameter[]]
if compare[name[self].value equal[==] name[self].num] begin[:]
call[name[self].end, parameter[]] | keyword[def] identifier[update] ( identifier[self] , identifier[value] = literal[int] ):
literal[string]
identifier[self] . identifier[_deltas] . identifier[append] ( identifier[time] . identifier[time] ())
identifier[self] . identifier[value] = identifier[value]
identifier[self] . identifier[_percent] = literal[int] * identifier[self] . identifier[value] / identifier[self] . identifier[num]
keyword[if] identifier[self] . identifier[bar] :
identifier[self] . identifier[_bars] = identifier[self] . identifier[_bar_symbol] * identifier[int] ( identifier[np] . identifier[round] ( identifier[self] . identifier[_percent] / literal[int] * identifier[self] . identifier[_barsize] ))
keyword[if] ( identifier[len] ( identifier[self] . identifier[_deltas] )< literal[int] ) keyword[or] ( identifier[self] . identifier[_deltas] [- literal[int] ]- identifier[self] . identifier[_deltas] [- literal[int] ])> literal[int] :
identifier[self] . identifier[_estimate_time] ()
identifier[self] . identifier[_draw] ()
keyword[if] identifier[self] . identifier[value] == identifier[self] . identifier[num] :
identifier[self] . identifier[end] () | def update(self, value=0):
"""
Update the value of the progress and update progress bar.
Parameters
-----------
value : integer
The current iteration of the progress
"""
self._deltas.append(time.time())
self.value = value
self._percent = 100.0 * self.value / self.num
if self.bar:
self._bars = self._bar_symbol * int(np.round(self._percent / 100.0 * self._barsize)) # depends on [control=['if'], data=[]]
if len(self._deltas) < 2 or self._deltas[-1] - self._deltas[-2] > 0.1:
self._estimate_time()
self._draw() # depends on [control=['if'], data=[]]
if self.value == self.num:
self.end() # depends on [control=['if'], data=[]] |
def word_literals(self):
"""The list of literals per word in ``words`` layer."""
literals = []
for word_synsets in self.synsets:
word_literals = set()
for synset in word_synsets:
for variant in synset.get(SYN_VARIANTS):
if LITERAL in variant:
word_literals.add(variant[LITERAL])
literals.append(list(sorted(word_literals)))
return literals | def function[word_literals, parameter[self]]:
constant[The list of literals per word in ``words`` layer.]
variable[literals] assign[=] list[[]]
for taget[name[word_synsets]] in starred[name[self].synsets] begin[:]
variable[word_literals] assign[=] call[name[set], parameter[]]
for taget[name[synset]] in starred[name[word_synsets]] begin[:]
for taget[name[variant]] in starred[call[name[synset].get, parameter[name[SYN_VARIANTS]]]] begin[:]
if compare[name[LITERAL] in name[variant]] begin[:]
call[name[word_literals].add, parameter[call[name[variant]][name[LITERAL]]]]
call[name[literals].append, parameter[call[name[list], parameter[call[name[sorted], parameter[name[word_literals]]]]]]]
return[name[literals]] | keyword[def] identifier[word_literals] ( identifier[self] ):
literal[string]
identifier[literals] =[]
keyword[for] identifier[word_synsets] keyword[in] identifier[self] . identifier[synsets] :
identifier[word_literals] = identifier[set] ()
keyword[for] identifier[synset] keyword[in] identifier[word_synsets] :
keyword[for] identifier[variant] keyword[in] identifier[synset] . identifier[get] ( identifier[SYN_VARIANTS] ):
keyword[if] identifier[LITERAL] keyword[in] identifier[variant] :
identifier[word_literals] . identifier[add] ( identifier[variant] [ identifier[LITERAL] ])
identifier[literals] . identifier[append] ( identifier[list] ( identifier[sorted] ( identifier[word_literals] )))
keyword[return] identifier[literals] | def word_literals(self):
"""The list of literals per word in ``words`` layer."""
literals = []
for word_synsets in self.synsets:
word_literals = set()
for synset in word_synsets:
for variant in synset.get(SYN_VARIANTS):
if LITERAL in variant:
word_literals.add(variant[LITERAL]) # depends on [control=['if'], data=['LITERAL', 'variant']] # depends on [control=['for'], data=['variant']] # depends on [control=['for'], data=['synset']]
literals.append(list(sorted(word_literals))) # depends on [control=['for'], data=['word_synsets']]
return literals |
def open_new_window(self, switch_to=True):
""" Opens a new browser tab/window and switches to it by default. """
self.driver.execute_script("window.open('');")
time.sleep(0.01)
if switch_to:
self.switch_to_window(len(self.driver.window_handles) - 1) | def function[open_new_window, parameter[self, switch_to]]:
constant[ Opens a new browser tab/window and switches to it by default. ]
call[name[self].driver.execute_script, parameter[constant[window.open('');]]]
call[name[time].sleep, parameter[constant[0.01]]]
if name[switch_to] begin[:]
call[name[self].switch_to_window, parameter[binary_operation[call[name[len], parameter[name[self].driver.window_handles]] - constant[1]]]] | keyword[def] identifier[open_new_window] ( identifier[self] , identifier[switch_to] = keyword[True] ):
literal[string]
identifier[self] . identifier[driver] . identifier[execute_script] ( literal[string] )
identifier[time] . identifier[sleep] ( literal[int] )
keyword[if] identifier[switch_to] :
identifier[self] . identifier[switch_to_window] ( identifier[len] ( identifier[self] . identifier[driver] . identifier[window_handles] )- literal[int] ) | def open_new_window(self, switch_to=True):
""" Opens a new browser tab/window and switches to it by default. """
self.driver.execute_script("window.open('');")
time.sleep(0.01)
if switch_to:
self.switch_to_window(len(self.driver.window_handles) - 1) # depends on [control=['if'], data=[]] |
def proto_0201(theABF):
"""protocol: membrane test."""
abf=ABF(theABF)
abf.log.info("analyzing as a membrane test")
plot=ABFplot(abf)
plot.figure_height,plot.figure_width=SQUARESIZE/2,SQUARESIZE/2
plot.figure_sweeps()
# save it
plt.tight_layout()
frameAndSave(abf,"membrane test")
plt.close('all') | def function[proto_0201, parameter[theABF]]:
constant[protocol: membrane test.]
variable[abf] assign[=] call[name[ABF], parameter[name[theABF]]]
call[name[abf].log.info, parameter[constant[analyzing as a membrane test]]]
variable[plot] assign[=] call[name[ABFplot], parameter[name[abf]]]
<ast.Tuple object at 0x7da1b00e61a0> assign[=] tuple[[<ast.BinOp object at 0x7da1b00e68f0>, <ast.BinOp object at 0x7da1b00e5930>]]
call[name[plot].figure_sweeps, parameter[]]
call[name[plt].tight_layout, parameter[]]
call[name[frameAndSave], parameter[name[abf], constant[membrane test]]]
call[name[plt].close, parameter[constant[all]]] | keyword[def] identifier[proto_0201] ( identifier[theABF] ):
literal[string]
identifier[abf] = identifier[ABF] ( identifier[theABF] )
identifier[abf] . identifier[log] . identifier[info] ( literal[string] )
identifier[plot] = identifier[ABFplot] ( identifier[abf] )
identifier[plot] . identifier[figure_height] , identifier[plot] . identifier[figure_width] = identifier[SQUARESIZE] / literal[int] , identifier[SQUARESIZE] / literal[int]
identifier[plot] . identifier[figure_sweeps] ()
identifier[plt] . identifier[tight_layout] ()
identifier[frameAndSave] ( identifier[abf] , literal[string] )
identifier[plt] . identifier[close] ( literal[string] ) | def proto_0201(theABF):
"""protocol: membrane test."""
abf = ABF(theABF)
abf.log.info('analyzing as a membrane test')
plot = ABFplot(abf)
(plot.figure_height, plot.figure_width) = (SQUARESIZE / 2, SQUARESIZE / 2)
plot.figure_sweeps()
# save it
plt.tight_layout()
frameAndSave(abf, 'membrane test')
plt.close('all') |
def set_alpn_protos(self, protos):
"""
Specify the protocols that the client is prepared to speak after the
TLS connection has been negotiated using Application Layer Protocol
Negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
"""
# Take the list of protocols and join them together, prefixing them
# with their lengths.
protostr = b''.join(
chain.from_iterable((int2byte(len(p)), p) for p in protos)
)
# Build a C string from the list. We don't need to save this off
# because OpenSSL immediately copies the data out.
input_str = _ffi.new("unsigned char[]", protostr)
_lib.SSL_CTX_set_alpn_protos(self._context, input_str, len(protostr)) | def function[set_alpn_protos, parameter[self, protos]]:
constant[
Specify the protocols that the client is prepared to speak after the
TLS connection has been negotiated using Application Layer Protocol
Negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
]
variable[protostr] assign[=] call[constant[b''].join, parameter[call[name[chain].from_iterable, parameter[<ast.GeneratorExp object at 0x7da1b020dd20>]]]]
variable[input_str] assign[=] call[name[_ffi].new, parameter[constant[unsigned char[]], name[protostr]]]
call[name[_lib].SSL_CTX_set_alpn_protos, parameter[name[self]._context, name[input_str], call[name[len], parameter[name[protostr]]]]] | keyword[def] identifier[set_alpn_protos] ( identifier[self] , identifier[protos] ):
literal[string]
identifier[protostr] = literal[string] . identifier[join] (
identifier[chain] . identifier[from_iterable] (( identifier[int2byte] ( identifier[len] ( identifier[p] )), identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[protos] )
)
identifier[input_str] = identifier[_ffi] . identifier[new] ( literal[string] , identifier[protostr] )
identifier[_lib] . identifier[SSL_CTX_set_alpn_protos] ( identifier[self] . identifier[_context] , identifier[input_str] , identifier[len] ( identifier[protostr] )) | def set_alpn_protos(self, protos):
"""
Specify the protocols that the client is prepared to speak after the
TLS connection has been negotiated using Application Layer Protocol
Negotiation.
:param protos: A list of the protocols to be offered to the server.
This list should be a Python list of bytestrings representing the
protocols to offer, e.g. ``[b'http/1.1', b'spdy/2']``.
"""
# Take the list of protocols and join them together, prefixing them
# with their lengths.
protostr = b''.join(chain.from_iterable(((int2byte(len(p)), p) for p in protos)))
# Build a C string from the list. We don't need to save this off
# because OpenSSL immediately copies the data out.
input_str = _ffi.new('unsigned char[]', protostr)
_lib.SSL_CTX_set_alpn_protos(self._context, input_str, len(protostr)) |
def write_gtfs(feed: "Feed", path: Path, ndigits: int = 6) -> None:
"""
Export the given feed to the given path.
If the path end in '.zip', then write the feed as a zip archive.
Otherwise assume the path is a directory, and write the feed as a
collection of CSV files to that directory, creating the directory
if it does not exist.
Round all decimals to ``ndigits`` decimal places.
All distances will be the distance units ``feed.dist_units``.
"""
path = Path(path)
if path.suffix == ".zip":
# Write to temporary directory before zipping
zipped = True
tmp_dir = tempfile.TemporaryDirectory()
new_path = Path(tmp_dir.name)
else:
zipped = False
if not path.exists():
path.mkdir()
new_path = path
for table in cs.GTFS_REF["table"].unique():
f = getattr(feed, table)
if f is None:
continue
f = f.copy()
# Some columns need to be output as integers.
# If there are NaNs in any such column,
# then Pandas will format the column as float, which we don't want.
f_int_cols = set(cs.INT_COLS) & set(f.columns)
for s in f_int_cols:
f[s] = f[s].fillna(-1).astype(int).astype(str).replace("-1", "")
p = new_path / (table + ".txt")
f.to_csv(str(p), index=False, float_format=f"%.{ndigits}f")
# Zip directory
if zipped:
basename = str(path.parent / path.stem)
shutil.make_archive(basename, format="zip", root_dir=tmp_dir.name)
tmp_dir.cleanup() | def function[write_gtfs, parameter[feed, path, ndigits]]:
constant[
Export the given feed to the given path.
If the path end in '.zip', then write the feed as a zip archive.
Otherwise assume the path is a directory, and write the feed as a
collection of CSV files to that directory, creating the directory
if it does not exist.
Round all decimals to ``ndigits`` decimal places.
All distances will be the distance units ``feed.dist_units``.
]
variable[path] assign[=] call[name[Path], parameter[name[path]]]
if compare[name[path].suffix equal[==] constant[.zip]] begin[:]
variable[zipped] assign[=] constant[True]
variable[tmp_dir] assign[=] call[name[tempfile].TemporaryDirectory, parameter[]]
variable[new_path] assign[=] call[name[Path], parameter[name[tmp_dir].name]]
for taget[name[table]] in starred[call[call[name[cs].GTFS_REF][constant[table]].unique, parameter[]]] begin[:]
variable[f] assign[=] call[name[getattr], parameter[name[feed], name[table]]]
if compare[name[f] is constant[None]] begin[:]
continue
variable[f] assign[=] call[name[f].copy, parameter[]]
variable[f_int_cols] assign[=] binary_operation[call[name[set], parameter[name[cs].INT_COLS]] <ast.BitAnd object at 0x7da2590d6b60> call[name[set], parameter[name[f].columns]]]
for taget[name[s]] in starred[name[f_int_cols]] begin[:]
call[name[f]][name[s]] assign[=] call[call[call[call[call[name[f]][name[s]].fillna, parameter[<ast.UnaryOp object at 0x7da1b0ebde10>]].astype, parameter[name[int]]].astype, parameter[name[str]]].replace, parameter[constant[-1], constant[]]]
variable[p] assign[=] binary_operation[name[new_path] / binary_operation[name[table] + constant[.txt]]]
call[name[f].to_csv, parameter[call[name[str], parameter[name[p]]]]]
if name[zipped] begin[:]
variable[basename] assign[=] call[name[str], parameter[binary_operation[name[path].parent / name[path].stem]]]
call[name[shutil].make_archive, parameter[name[basename]]]
call[name[tmp_dir].cleanup, parameter[]] | keyword[def] identifier[write_gtfs] ( identifier[feed] : literal[string] , identifier[path] : identifier[Path] , identifier[ndigits] : identifier[int] = literal[int] )-> keyword[None] :
literal[string]
identifier[path] = identifier[Path] ( identifier[path] )
keyword[if] identifier[path] . identifier[suffix] == literal[string] :
identifier[zipped] = keyword[True]
identifier[tmp_dir] = identifier[tempfile] . identifier[TemporaryDirectory] ()
identifier[new_path] = identifier[Path] ( identifier[tmp_dir] . identifier[name] )
keyword[else] :
identifier[zipped] = keyword[False]
keyword[if] keyword[not] identifier[path] . identifier[exists] ():
identifier[path] . identifier[mkdir] ()
identifier[new_path] = identifier[path]
keyword[for] identifier[table] keyword[in] identifier[cs] . identifier[GTFS_REF] [ literal[string] ]. identifier[unique] ():
identifier[f] = identifier[getattr] ( identifier[feed] , identifier[table] )
keyword[if] identifier[f] keyword[is] keyword[None] :
keyword[continue]
identifier[f] = identifier[f] . identifier[copy] ()
identifier[f_int_cols] = identifier[set] ( identifier[cs] . identifier[INT_COLS] )& identifier[set] ( identifier[f] . identifier[columns] )
keyword[for] identifier[s] keyword[in] identifier[f_int_cols] :
identifier[f] [ identifier[s] ]= identifier[f] [ identifier[s] ]. identifier[fillna] (- literal[int] ). identifier[astype] ( identifier[int] ). identifier[astype] ( identifier[str] ). identifier[replace] ( literal[string] , literal[string] )
identifier[p] = identifier[new_path] /( identifier[table] + literal[string] )
identifier[f] . identifier[to_csv] ( identifier[str] ( identifier[p] ), identifier[index] = keyword[False] , identifier[float_format] = literal[string] )
keyword[if] identifier[zipped] :
identifier[basename] = identifier[str] ( identifier[path] . identifier[parent] / identifier[path] . identifier[stem] )
identifier[shutil] . identifier[make_archive] ( identifier[basename] , identifier[format] = literal[string] , identifier[root_dir] = identifier[tmp_dir] . identifier[name] )
identifier[tmp_dir] . identifier[cleanup] () | def write_gtfs(feed: 'Feed', path: Path, ndigits: int=6) -> None:
"""
Export the given feed to the given path.
If the path end in '.zip', then write the feed as a zip archive.
Otherwise assume the path is a directory, and write the feed as a
collection of CSV files to that directory, creating the directory
if it does not exist.
Round all decimals to ``ndigits`` decimal places.
All distances will be the distance units ``feed.dist_units``.
"""
path = Path(path)
if path.suffix == '.zip':
# Write to temporary directory before zipping
zipped = True
tmp_dir = tempfile.TemporaryDirectory()
new_path = Path(tmp_dir.name) # depends on [control=['if'], data=[]]
else:
zipped = False
if not path.exists():
path.mkdir() # depends on [control=['if'], data=[]]
new_path = path
for table in cs.GTFS_REF['table'].unique():
f = getattr(feed, table)
if f is None:
continue # depends on [control=['if'], data=[]]
f = f.copy()
# Some columns need to be output as integers.
# If there are NaNs in any such column,
# then Pandas will format the column as float, which we don't want.
f_int_cols = set(cs.INT_COLS) & set(f.columns)
for s in f_int_cols:
f[s] = f[s].fillna(-1).astype(int).astype(str).replace('-1', '') # depends on [control=['for'], data=['s']]
p = new_path / (table + '.txt')
f.to_csv(str(p), index=False, float_format=f'%.{ndigits}f') # depends on [control=['for'], data=['table']]
# Zip directory
if zipped:
basename = str(path.parent / path.stem)
shutil.make_archive(basename, format='zip', root_dir=tmp_dir.name)
tmp_dir.cleanup() # depends on [control=['if'], data=[]] |
def get_description(self, name: str) -> str:
"""
Return the description, or a help string of variable identified by |name|.
"""
if name not in self._vars:
raise ConfigError(f"{self.name}.{name} not defined.")
return self._vars[name].description | def function[get_description, parameter[self, name]]:
constant[
Return the description, or a help string of variable identified by |name|.
]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self]._vars] begin[:]
<ast.Raise object at 0x7da18f810bb0>
return[call[name[self]._vars][name[name]].description] | keyword[def] identifier[get_description] ( identifier[self] , identifier[name] : identifier[str] )-> identifier[str] :
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[_vars] :
keyword[raise] identifier[ConfigError] ( literal[string] )
keyword[return] identifier[self] . identifier[_vars] [ identifier[name] ]. identifier[description] | def get_description(self, name: str) -> str:
"""
Return the description, or a help string of variable identified by |name|.
"""
if name not in self._vars:
raise ConfigError(f'{self.name}.{name} not defined.') # depends on [control=['if'], data=['name']]
return self._vars[name].description |
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s snippet' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode | def function[visit_snippet, parameter[self, node]]:
constant[
HTML document generator visit handler
]
variable[lang] assign[=] name[self].highlightlang
variable[linenos] assign[=] compare[call[name[node].rawsource.count, parameter[constant[
]]] greater_or_equal[>=] binary_operation[name[self].highlightlinenothreshold - constant[1]]]
variable[fname] assign[=] call[name[node]][constant[filename]]
variable[highlight_args] assign[=] call[name[node].get, parameter[constant[highlight_args], dictionary[[], []]]]
if compare[constant[language] in name[node]] begin[:]
variable[lang] assign[=] call[name[node]][constant[language]]
call[name[highlight_args]][constant[force]] assign[=] constant[True]
if compare[constant[linenos] in name[node]] begin[:]
variable[linenos] assign[=] call[name[node]][constant[linenos]]
def function[warner, parameter[msg]]:
call[name[self].builder.warn, parameter[name[msg], tuple[[<ast.Attribute object at 0x7da18bc71cf0>, <ast.Attribute object at 0x7da1b0df6c50>]]]]
variable[highlighted] assign[=] call[name[self].highlighter.highlight_block, parameter[name[node].rawsource, name[lang]]]
variable[starttag] assign[=] call[name[self].starttag, parameter[name[node], constant[div]]]
call[name[self].body.append, parameter[name[starttag]]]
call[name[self].body.append, parameter[binary_operation[constant[<div class="snippet-filename">%s</div>
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0df6f50>]]]]]
call[name[self].body.append, parameter[name[highlighted]]]
call[name[self].body.append, parameter[constant[</div>
]]]
<ast.Raise object at 0x7da1b0df49a0> | keyword[def] identifier[visit_snippet] ( identifier[self] , identifier[node] ):
literal[string]
identifier[lang] = identifier[self] . identifier[highlightlang]
identifier[linenos] = identifier[node] . identifier[rawsource] . identifier[count] ( literal[string] )>= identifier[self] . identifier[highlightlinenothreshold] - literal[int]
identifier[fname] = identifier[node] [ literal[string] ]
identifier[highlight_args] = identifier[node] . identifier[get] ( literal[string] ,{})
keyword[if] literal[string] keyword[in] identifier[node] :
identifier[lang] = identifier[node] [ literal[string] ]
identifier[highlight_args] [ literal[string] ]= keyword[True]
keyword[if] literal[string] keyword[in] identifier[node] :
identifier[linenos] = identifier[node] [ literal[string] ]
keyword[def] identifier[warner] ( identifier[msg] ):
identifier[self] . identifier[builder] . identifier[warn] ( identifier[msg] ,( identifier[self] . identifier[builder] . identifier[current_docname] , identifier[node] . identifier[line] ))
identifier[highlighted] = identifier[self] . identifier[highlighter] . identifier[highlight_block] ( identifier[node] . identifier[rawsource] , identifier[lang] ,
identifier[warn] = identifier[warner] ,
identifier[linenos] = identifier[linenos] ,
** identifier[highlight_args] )
identifier[starttag] = identifier[self] . identifier[starttag] ( identifier[node] , literal[string] , identifier[suffix] = literal[string] ,
identifier[CLASS] = literal[string] % identifier[lang] )
identifier[self] . identifier[body] . identifier[append] ( identifier[starttag] )
identifier[self] . identifier[body] . identifier[append] ( literal[string] literal[string] %( identifier[fname] ,))
identifier[self] . identifier[body] . identifier[append] ( identifier[highlighted] )
identifier[self] . identifier[body] . identifier[append] ( literal[string] )
keyword[raise] identifier[nodes] . identifier[SkipNode] | def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True # depends on [control=['if'], data=['node']]
if 'linenos' in node:
linenos = node['linenos'] # depends on [control=['if'], data=['node']]
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang, warn=warner, linenos=linenos, **highlight_args)
starttag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s snippet' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode |
def _PrintCheckDependencyStatus(
self, dependency, result, status_message, verbose_output=True):
"""Prints the check dependency status.
Args:
dependency (DependencyDefinition): dependency definition.
result (bool): True if the Python module is available and conforms to
the minimum required version, False otherwise.
status_message (str): status message.
verbose_output (Optional[bool]): True if output should be verbose.
"""
if not result or dependency.is_optional:
if dependency.is_optional:
status_indicator = '[OPTIONAL]'
else:
status_indicator = '[FAILURE]'
print('{0:s}\t{1:s}'.format(status_indicator, status_message))
elif verbose_output:
print('[OK]\t\t{0:s}'.format(status_message)) | def function[_PrintCheckDependencyStatus, parameter[self, dependency, result, status_message, verbose_output]]:
constant[Prints the check dependency status.
Args:
dependency (DependencyDefinition): dependency definition.
result (bool): True if the Python module is available and conforms to
the minimum required version, False otherwise.
status_message (str): status message.
verbose_output (Optional[bool]): True if output should be verbose.
]
if <ast.BoolOp object at 0x7da2054a5150> begin[:]
if name[dependency].is_optional begin[:]
variable[status_indicator] assign[=] constant[[OPTIONAL]]
call[name[print], parameter[call[constant[{0:s} {1:s}].format, parameter[name[status_indicator], name[status_message]]]]] | keyword[def] identifier[_PrintCheckDependencyStatus] (
identifier[self] , identifier[dependency] , identifier[result] , identifier[status_message] , identifier[verbose_output] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[result] keyword[or] identifier[dependency] . identifier[is_optional] :
keyword[if] identifier[dependency] . identifier[is_optional] :
identifier[status_indicator] = literal[string]
keyword[else] :
identifier[status_indicator] = literal[string]
identifier[print] ( literal[string] . identifier[format] ( identifier[status_indicator] , identifier[status_message] ))
keyword[elif] identifier[verbose_output] :
identifier[print] ( literal[string] . identifier[format] ( identifier[status_message] )) | def _PrintCheckDependencyStatus(self, dependency, result, status_message, verbose_output=True):
"""Prints the check dependency status.
Args:
dependency (DependencyDefinition): dependency definition.
result (bool): True if the Python module is available and conforms to
the minimum required version, False otherwise.
status_message (str): status message.
verbose_output (Optional[bool]): True if output should be verbose.
"""
if not result or dependency.is_optional:
if dependency.is_optional:
status_indicator = '[OPTIONAL]' # depends on [control=['if'], data=[]]
else:
status_indicator = '[FAILURE]'
print('{0:s}\t{1:s}'.format(status_indicator, status_message)) # depends on [control=['if'], data=[]]
elif verbose_output:
print('[OK]\t\t{0:s}'.format(status_message)) # depends on [control=['if'], data=[]] |
def _find_addresses(self, seed, index, count, security_level, checksum):
# type: (Seed, int, Optional[int], int, bool) -> List[Address]
"""
Find addresses matching the command parameters.
"""
generator = AddressGenerator(seed, security_level, checksum)
if count is None:
# Connect to Tangle and find the first address without any
# transactions.
for addy in generator.create_iterator(start=index):
# We use addy.address here because FindTransactions does
# not work on an address with a checksum
response = FindTransactionsCommand(self.adapter)(
addresses=[addy.address],
)
if not response.get('hashes'):
return [addy]
return generator.get_addresses(start=index, count=count) | def function[_find_addresses, parameter[self, seed, index, count, security_level, checksum]]:
constant[
Find addresses matching the command parameters.
]
variable[generator] assign[=] call[name[AddressGenerator], parameter[name[seed], name[security_level], name[checksum]]]
if compare[name[count] is constant[None]] begin[:]
for taget[name[addy]] in starred[call[name[generator].create_iterator, parameter[]]] begin[:]
variable[response] assign[=] call[call[name[FindTransactionsCommand], parameter[name[self].adapter]], parameter[]]
if <ast.UnaryOp object at 0x7da20cabe380> begin[:]
return[list[[<ast.Name object at 0x7da20c6e7ac0>]]]
return[call[name[generator].get_addresses, parameter[]]] | keyword[def] identifier[_find_addresses] ( identifier[self] , identifier[seed] , identifier[index] , identifier[count] , identifier[security_level] , identifier[checksum] ):
literal[string]
identifier[generator] = identifier[AddressGenerator] ( identifier[seed] , identifier[security_level] , identifier[checksum] )
keyword[if] identifier[count] keyword[is] keyword[None] :
keyword[for] identifier[addy] keyword[in] identifier[generator] . identifier[create_iterator] ( identifier[start] = identifier[index] ):
identifier[response] = identifier[FindTransactionsCommand] ( identifier[self] . identifier[adapter] )(
identifier[addresses] =[ identifier[addy] . identifier[address] ],
)
keyword[if] keyword[not] identifier[response] . identifier[get] ( literal[string] ):
keyword[return] [ identifier[addy] ]
keyword[return] identifier[generator] . identifier[get_addresses] ( identifier[start] = identifier[index] , identifier[count] = identifier[count] ) | def _find_addresses(self, seed, index, count, security_level, checksum):
# type: (Seed, int, Optional[int], int, bool) -> List[Address]
'\n Find addresses matching the command parameters.\n '
generator = AddressGenerator(seed, security_level, checksum)
if count is None:
# Connect to Tangle and find the first address without any
# transactions.
for addy in generator.create_iterator(start=index):
# We use addy.address here because FindTransactions does
# not work on an address with a checksum
response = FindTransactionsCommand(self.adapter)(addresses=[addy.address])
if not response.get('hashes'):
return [addy] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['addy']] # depends on [control=['if'], data=[]]
return generator.get_addresses(start=index, count=count) |
def _send_remote(self, url, data, headers=None, callback=None):
"""
Initialise a Tornado AsyncClient and send the request to the sentry
server. If the callback is a callable, it will be called with the
response.
"""
if headers is None:
headers = {}
return AsyncHTTPClient().fetch(
url, callback, method="POST", body=data, headers=headers,
validate_cert=self.validate_cert
) | def function[_send_remote, parameter[self, url, data, headers, callback]]:
constant[
Initialise a Tornado AsyncClient and send the request to the sentry
server. If the callback is a callable, it will be called with the
response.
]
if compare[name[headers] is constant[None]] begin[:]
variable[headers] assign[=] dictionary[[], []]
return[call[call[name[AsyncHTTPClient], parameter[]].fetch, parameter[name[url], name[callback]]]] | keyword[def] identifier[_send_remote] ( identifier[self] , identifier[url] , identifier[data] , identifier[headers] = keyword[None] , identifier[callback] = keyword[None] ):
literal[string]
keyword[if] identifier[headers] keyword[is] keyword[None] :
identifier[headers] ={}
keyword[return] identifier[AsyncHTTPClient] (). identifier[fetch] (
identifier[url] , identifier[callback] , identifier[method] = literal[string] , identifier[body] = identifier[data] , identifier[headers] = identifier[headers] ,
identifier[validate_cert] = identifier[self] . identifier[validate_cert]
) | def _send_remote(self, url, data, headers=None, callback=None):
"""
Initialise a Tornado AsyncClient and send the request to the sentry
server. If the callback is a callable, it will be called with the
response.
"""
if headers is None:
headers = {} # depends on [control=['if'], data=['headers']]
return AsyncHTTPClient().fetch(url, callback, method='POST', body=data, headers=headers, validate_cert=self.validate_cert) |
def user_can_edit_news(user):
"""
Check if the user has permission to edit any of the registered NewsItem
types.
"""
newsitem_models = [model.get_newsitem_model()
for model in NEWSINDEX_MODEL_CLASSES]
if user.is_active and user.is_superuser:
# admin can edit news iff any news types exist
return bool(newsitem_models)
for NewsItem in newsitem_models:
for perm in format_perms(NewsItem, ['add', 'change', 'delete']):
if user.has_perm(perm):
return True
return False | def function[user_can_edit_news, parameter[user]]:
constant[
Check if the user has permission to edit any of the registered NewsItem
types.
]
variable[newsitem_models] assign[=] <ast.ListComp object at 0x7da1b0f06410>
if <ast.BoolOp object at 0x7da1b0f3a110> begin[:]
return[call[name[bool], parameter[name[newsitem_models]]]]
for taget[name[NewsItem]] in starred[name[newsitem_models]] begin[:]
for taget[name[perm]] in starred[call[name[format_perms], parameter[name[NewsItem], list[[<ast.Constant object at 0x7da2044c0e50>, <ast.Constant object at 0x7da2044c19f0>, <ast.Constant object at 0x7da2044c0c40>]]]]] begin[:]
if call[name[user].has_perm, parameter[name[perm]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[user_can_edit_news] ( identifier[user] ):
literal[string]
identifier[newsitem_models] =[ identifier[model] . identifier[get_newsitem_model] ()
keyword[for] identifier[model] keyword[in] identifier[NEWSINDEX_MODEL_CLASSES] ]
keyword[if] identifier[user] . identifier[is_active] keyword[and] identifier[user] . identifier[is_superuser] :
keyword[return] identifier[bool] ( identifier[newsitem_models] )
keyword[for] identifier[NewsItem] keyword[in] identifier[newsitem_models] :
keyword[for] identifier[perm] keyword[in] identifier[format_perms] ( identifier[NewsItem] ,[ literal[string] , literal[string] , literal[string] ]):
keyword[if] identifier[user] . identifier[has_perm] ( identifier[perm] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def user_can_edit_news(user):
"""
Check if the user has permission to edit any of the registered NewsItem
types.
"""
newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES]
if user.is_active and user.is_superuser:
# admin can edit news iff any news types exist
return bool(newsitem_models) # depends on [control=['if'], data=[]]
for NewsItem in newsitem_models:
for perm in format_perms(NewsItem, ['add', 'change', 'delete']):
if user.has_perm(perm):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['perm']] # depends on [control=['for'], data=['NewsItem']]
return False |
def query(cls, url=urljoin(config.API_URL, 'stac/search'), **kwargs):
""" Get request """
logger.debug('Query URL: %s, Body: %s' % (url, json.dumps(kwargs)))
response = requests.post(url, data=json.dumps(kwargs))
# API error
if response.status_code != 200:
raise SatSearchError(response.text)
return response.json() | def function[query, parameter[cls, url]]:
constant[ Get request ]
call[name[logger].debug, parameter[binary_operation[constant[Query URL: %s, Body: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26afbe0>, <ast.Call object at 0x7da1b26ad870>]]]]]
variable[response] assign[=] call[name[requests].post, parameter[name[url]]]
if compare[name[response].status_code not_equal[!=] constant[200]] begin[:]
<ast.Raise object at 0x7da1b26ac130>
return[call[name[response].json, parameter[]]] | keyword[def] identifier[query] ( identifier[cls] , identifier[url] = identifier[urljoin] ( identifier[config] . identifier[API_URL] , literal[string] ),** identifier[kwargs] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] %( identifier[url] , identifier[json] . identifier[dumps] ( identifier[kwargs] )))
identifier[response] = identifier[requests] . identifier[post] ( identifier[url] , identifier[data] = identifier[json] . identifier[dumps] ( identifier[kwargs] ))
keyword[if] identifier[response] . identifier[status_code] != literal[int] :
keyword[raise] identifier[SatSearchError] ( identifier[response] . identifier[text] )
keyword[return] identifier[response] . identifier[json] () | def query(cls, url=urljoin(config.API_URL, 'stac/search'), **kwargs):
""" Get request """
logger.debug('Query URL: %s, Body: %s' % (url, json.dumps(kwargs)))
response = requests.post(url, data=json.dumps(kwargs))
# API error
if response.status_code != 200:
raise SatSearchError(response.text) # depends on [control=['if'], data=[]]
return response.json() |
def genesis_signing_lockset(genesis, privkey):
"""
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
"""
v = VoteBlock(0, 0, genesis.hash)
v.sign(privkey)
ls = LockSet(num_eligible_votes=1)
ls.add(v)
assert ls.has_quorum
return ls | def function[genesis_signing_lockset, parameter[genesis, privkey]]:
constant[
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
]
variable[v] assign[=] call[name[VoteBlock], parameter[constant[0], constant[0], name[genesis].hash]]
call[name[v].sign, parameter[name[privkey]]]
variable[ls] assign[=] call[name[LockSet], parameter[]]
call[name[ls].add, parameter[name[v]]]
assert[name[ls].has_quorum]
return[name[ls]] | keyword[def] identifier[genesis_signing_lockset] ( identifier[genesis] , identifier[privkey] ):
literal[string]
identifier[v] = identifier[VoteBlock] ( literal[int] , literal[int] , identifier[genesis] . identifier[hash] )
identifier[v] . identifier[sign] ( identifier[privkey] )
identifier[ls] = identifier[LockSet] ( identifier[num_eligible_votes] = literal[int] )
identifier[ls] . identifier[add] ( identifier[v] )
keyword[assert] identifier[ls] . identifier[has_quorum]
keyword[return] identifier[ls] | def genesis_signing_lockset(genesis, privkey):
"""
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
"""
v = VoteBlock(0, 0, genesis.hash)
v.sign(privkey)
ls = LockSet(num_eligible_votes=1)
ls.add(v)
assert ls.has_quorum
return ls |
def child_sequence(self, child):
"""Search for the sequence that contains this child.
:param child: The child node to search sequences for.
:type child: NodeNG
:returns: The sequence containing the given child node.
:rtype: iterable(NodeNG)
:raises AstroidError: If no sequence could be found that contains
the given child.
"""
for field in self._astroid_fields:
node_or_sequence = getattr(self, field)
if node_or_sequence is child:
return [node_or_sequence]
# /!\ compiler.ast Nodes have an __iter__ walking over child nodes
if (
isinstance(node_or_sequence, (tuple, list))
and child in node_or_sequence
):
return node_or_sequence
msg = "Could not find %s in %s's children"
raise exceptions.AstroidError(msg % (repr(child), repr(self))) | def function[child_sequence, parameter[self, child]]:
constant[Search for the sequence that contains this child.
:param child: The child node to search sequences for.
:type child: NodeNG
:returns: The sequence containing the given child node.
:rtype: iterable(NodeNG)
:raises AstroidError: If no sequence could be found that contains
the given child.
]
for taget[name[field]] in starred[name[self]._astroid_fields] begin[:]
variable[node_or_sequence] assign[=] call[name[getattr], parameter[name[self], name[field]]]
if compare[name[node_or_sequence] is name[child]] begin[:]
return[list[[<ast.Name object at 0x7da1b1e74ac0>]]]
if <ast.BoolOp object at 0x7da1b1e74a00> begin[:]
return[name[node_or_sequence]]
variable[msg] assign[=] constant[Could not find %s in %s's children]
<ast.Raise object at 0x7da1b1e77c40> | keyword[def] identifier[child_sequence] ( identifier[self] , identifier[child] ):
literal[string]
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[_astroid_fields] :
identifier[node_or_sequence] = identifier[getattr] ( identifier[self] , identifier[field] )
keyword[if] identifier[node_or_sequence] keyword[is] identifier[child] :
keyword[return] [ identifier[node_or_sequence] ]
keyword[if] (
identifier[isinstance] ( identifier[node_or_sequence] ,( identifier[tuple] , identifier[list] ))
keyword[and] identifier[child] keyword[in] identifier[node_or_sequence]
):
keyword[return] identifier[node_or_sequence]
identifier[msg] = literal[string]
keyword[raise] identifier[exceptions] . identifier[AstroidError] ( identifier[msg] %( identifier[repr] ( identifier[child] ), identifier[repr] ( identifier[self] ))) | def child_sequence(self, child):
"""Search for the sequence that contains this child.
:param child: The child node to search sequences for.
:type child: NodeNG
:returns: The sequence containing the given child node.
:rtype: iterable(NodeNG)
:raises AstroidError: If no sequence could be found that contains
the given child.
"""
for field in self._astroid_fields:
node_or_sequence = getattr(self, field)
if node_or_sequence is child:
return [node_or_sequence] # depends on [control=['if'], data=['node_or_sequence']]
# /!\ compiler.ast Nodes have an __iter__ walking over child nodes
if isinstance(node_or_sequence, (tuple, list)) and child in node_or_sequence:
return node_or_sequence # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
msg = "Could not find %s in %s's children"
raise exceptions.AstroidError(msg % (repr(child), repr(self))) |
def share_secret(threshold, nshares, secret, identifier, hash_id=Hash.SHA256):
"""
Create nshares of the secret. threshold specifies the number of shares
needed for reconstructing the secret value. A 0-16 bytes identifier must
be provided. Optionally the secret is hashed with the algorithm specified
by hash_id, a class attribute of Hash.
This function must return a list of formatted shares or raises a TSSError
exception if anything went wrong.
"""
if identifier is None:
raise TSSError('an identifier must be provided')
if not Hash.is_valid(hash_id):
raise TSSError('invalid hash algorithm %s' % hash_id)
secret = encode(secret)
identifier = encode(identifier)
if hash_id != Hash.NONE:
secret += Hash.to_func(hash_id)(secret).digest()
shares = generate_shares(threshold, nshares, secret)
header = format_header(identifier, hash_id, threshold, len(secret) + 1)
return [format_share(header, share) for share in shares] | def function[share_secret, parameter[threshold, nshares, secret, identifier, hash_id]]:
constant[
Create nshares of the secret. threshold specifies the number of shares
needed for reconstructing the secret value. A 0-16 bytes identifier must
be provided. Optionally the secret is hashed with the algorithm specified
by hash_id, a class attribute of Hash.
This function must return a list of formatted shares or raises a TSSError
exception if anything went wrong.
]
if compare[name[identifier] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6a92a0>
if <ast.UnaryOp object at 0x7da20c6a8c10> begin[:]
<ast.Raise object at 0x7da20c6a9750>
variable[secret] assign[=] call[name[encode], parameter[name[secret]]]
variable[identifier] assign[=] call[name[encode], parameter[name[identifier]]]
if compare[name[hash_id] not_equal[!=] name[Hash].NONE] begin[:]
<ast.AugAssign object at 0x7da20c6aa680>
variable[shares] assign[=] call[name[generate_shares], parameter[name[threshold], name[nshares], name[secret]]]
variable[header] assign[=] call[name[format_header], parameter[name[identifier], name[hash_id], name[threshold], binary_operation[call[name[len], parameter[name[secret]]] + constant[1]]]]
return[<ast.ListComp object at 0x7da18c4ccc70>] | keyword[def] identifier[share_secret] ( identifier[threshold] , identifier[nshares] , identifier[secret] , identifier[identifier] , identifier[hash_id] = identifier[Hash] . identifier[SHA256] ):
literal[string]
keyword[if] identifier[identifier] keyword[is] keyword[None] :
keyword[raise] identifier[TSSError] ( literal[string] )
keyword[if] keyword[not] identifier[Hash] . identifier[is_valid] ( identifier[hash_id] ):
keyword[raise] identifier[TSSError] ( literal[string] % identifier[hash_id] )
identifier[secret] = identifier[encode] ( identifier[secret] )
identifier[identifier] = identifier[encode] ( identifier[identifier] )
keyword[if] identifier[hash_id] != identifier[Hash] . identifier[NONE] :
identifier[secret] += identifier[Hash] . identifier[to_func] ( identifier[hash_id] )( identifier[secret] ). identifier[digest] ()
identifier[shares] = identifier[generate_shares] ( identifier[threshold] , identifier[nshares] , identifier[secret] )
identifier[header] = identifier[format_header] ( identifier[identifier] , identifier[hash_id] , identifier[threshold] , identifier[len] ( identifier[secret] )+ literal[int] )
keyword[return] [ identifier[format_share] ( identifier[header] , identifier[share] ) keyword[for] identifier[share] keyword[in] identifier[shares] ] | def share_secret(threshold, nshares, secret, identifier, hash_id=Hash.SHA256):
"""
Create nshares of the secret. threshold specifies the number of shares
needed for reconstructing the secret value. A 0-16 bytes identifier must
be provided. Optionally the secret is hashed with the algorithm specified
by hash_id, a class attribute of Hash.
This function must return a list of formatted shares or raises a TSSError
exception if anything went wrong.
"""
if identifier is None:
raise TSSError('an identifier must be provided') # depends on [control=['if'], data=[]]
if not Hash.is_valid(hash_id):
raise TSSError('invalid hash algorithm %s' % hash_id) # depends on [control=['if'], data=[]]
secret = encode(secret)
identifier = encode(identifier)
if hash_id != Hash.NONE:
secret += Hash.to_func(hash_id)(secret).digest() # depends on [control=['if'], data=['hash_id']]
shares = generate_shares(threshold, nshares, secret)
header = format_header(identifier, hash_id, threshold, len(secret) + 1)
return [format_share(header, share) for share in shares] |
def _api_model_patch_replace(conn, restApiId, modelName, path, value):
'''
the replace patch operation on a Model resource
'''
response = conn.update_model(restApiId=restApiId, modelName=modelName,
patchOperations=[{'op': 'replace', 'path': path, 'value': value}])
return response | def function[_api_model_patch_replace, parameter[conn, restApiId, modelName, path, value]]:
constant[
the replace patch operation on a Model resource
]
variable[response] assign[=] call[name[conn].update_model, parameter[]]
return[name[response]] | keyword[def] identifier[_api_model_patch_replace] ( identifier[conn] , identifier[restApiId] , identifier[modelName] , identifier[path] , identifier[value] ):
literal[string]
identifier[response] = identifier[conn] . identifier[update_model] ( identifier[restApiId] = identifier[restApiId] , identifier[modelName] = identifier[modelName] ,
identifier[patchOperations] =[{ literal[string] : literal[string] , literal[string] : identifier[path] , literal[string] : identifier[value] }])
keyword[return] identifier[response] | def _api_model_patch_replace(conn, restApiId, modelName, path, value):
"""
the replace patch operation on a Model resource
"""
response = conn.update_model(restApiId=restApiId, modelName=modelName, patchOperations=[{'op': 'replace', 'path': path, 'value': value}])
return response |
def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data)
else:
raise TypeError("Provided Data is not json") | def function[__parse_json_data, parameter[self, data]]:
constant[Process Json data
:@param data
:@type data: json/dict
:throws TypeError
]
if <ast.BoolOp object at 0x7da18eb54e20> begin[:]
name[self]._raw_data assign[=] name[data]
name[self]._json_data assign[=] call[name[copy].deepcopy, parameter[name[self]._raw_data]] | keyword[def] identifier[__parse_json_data] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ) keyword[or] identifier[isinstance] ( identifier[data] , identifier[list] ):
identifier[self] . identifier[_raw_data] = identifier[data]
identifier[self] . identifier[_json_data] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[_raw_data] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] ) | def __parse_json_data(self, data):
"""Process Json data
:@param data
:@type data: json/dict
:throws TypeError
"""
if isinstance(data, dict) or isinstance(data, list):
self._raw_data = data
self._json_data = copy.deepcopy(self._raw_data) # depends on [control=['if'], data=[]]
else:
raise TypeError('Provided Data is not json') |
def _read_header(self):
"""
When a CSV file is given, extracts header information the file.
Otherwise, this header data must be explicitly given when the object
is instantiated.
"""
if not self.filename or self.header_types:
return
rows = csv.reader(open(self.filename))
#header = rows.next()
header = next(rows)
self.header_types = {} # {attr_name:type}
self._class_attr_name = None
self.header_order = [] # [attr_name,...]
for el in header:
matches = ATTR_HEADER_PATTERN.findall(el)
assert matches, "Invalid header element: %s" % (el,)
el_name, el_type, el_mode = matches[0]
el_name = el_name.strip()
self.header_order.append(el_name)
self.header_types[el_name] = el_type
if el_mode == ATTR_MODE_CLASS:
assert self._class_attr_name is None, \
"Multiple class attributes are not supported."
self._class_attr_name = el_name
else:
assert self.header_types[el_name] != ATTR_TYPE_CONTINUOUS, \
"Non-class continuous attributes are not supported."
assert self._class_attr_name, "A class attribute must be specified." | def function[_read_header, parameter[self]]:
constant[
When a CSV file is given, extracts header information the file.
Otherwise, this header data must be explicitly given when the object
is instantiated.
]
if <ast.BoolOp object at 0x7da1b0f2a8f0> begin[:]
return[None]
variable[rows] assign[=] call[name[csv].reader, parameter[call[name[open], parameter[name[self].filename]]]]
variable[header] assign[=] call[name[next], parameter[name[rows]]]
name[self].header_types assign[=] dictionary[[], []]
name[self]._class_attr_name assign[=] constant[None]
name[self].header_order assign[=] list[[]]
for taget[name[el]] in starred[name[header]] begin[:]
variable[matches] assign[=] call[name[ATTR_HEADER_PATTERN].findall, parameter[name[el]]]
assert[name[matches]]
<ast.Tuple object at 0x7da1b0f2bbb0> assign[=] call[name[matches]][constant[0]]
variable[el_name] assign[=] call[name[el_name].strip, parameter[]]
call[name[self].header_order.append, parameter[name[el_name]]]
call[name[self].header_types][name[el_name]] assign[=] name[el_type]
if compare[name[el_mode] equal[==] name[ATTR_MODE_CLASS]] begin[:]
assert[compare[name[self]._class_attr_name is constant[None]]]
name[self]._class_attr_name assign[=] name[el_name]
assert[name[self]._class_attr_name] | keyword[def] identifier[_read_header] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[filename] keyword[or] identifier[self] . identifier[header_types] :
keyword[return]
identifier[rows] = identifier[csv] . identifier[reader] ( identifier[open] ( identifier[self] . identifier[filename] ))
identifier[header] = identifier[next] ( identifier[rows] )
identifier[self] . identifier[header_types] ={}
identifier[self] . identifier[_class_attr_name] = keyword[None]
identifier[self] . identifier[header_order] =[]
keyword[for] identifier[el] keyword[in] identifier[header] :
identifier[matches] = identifier[ATTR_HEADER_PATTERN] . identifier[findall] ( identifier[el] )
keyword[assert] identifier[matches] , literal[string] %( identifier[el] ,)
identifier[el_name] , identifier[el_type] , identifier[el_mode] = identifier[matches] [ literal[int] ]
identifier[el_name] = identifier[el_name] . identifier[strip] ()
identifier[self] . identifier[header_order] . identifier[append] ( identifier[el_name] )
identifier[self] . identifier[header_types] [ identifier[el_name] ]= identifier[el_type]
keyword[if] identifier[el_mode] == identifier[ATTR_MODE_CLASS] :
keyword[assert] identifier[self] . identifier[_class_attr_name] keyword[is] keyword[None] , literal[string]
identifier[self] . identifier[_class_attr_name] = identifier[el_name]
keyword[else] :
keyword[assert] identifier[self] . identifier[header_types] [ identifier[el_name] ]!= identifier[ATTR_TYPE_CONTINUOUS] , literal[string]
keyword[assert] identifier[self] . identifier[_class_attr_name] , literal[string] | def _read_header(self):
"""
When a CSV file is given, extracts header information the file.
Otherwise, this header data must be explicitly given when the object
is instantiated.
"""
if not self.filename or self.header_types:
return # depends on [control=['if'], data=[]]
rows = csv.reader(open(self.filename))
#header = rows.next()
header = next(rows)
self.header_types = {} # {attr_name:type}
self._class_attr_name = None
self.header_order = [] # [attr_name,...]
for el in header:
matches = ATTR_HEADER_PATTERN.findall(el)
assert matches, 'Invalid header element: %s' % (el,)
(el_name, el_type, el_mode) = matches[0]
el_name = el_name.strip()
self.header_order.append(el_name)
self.header_types[el_name] = el_type
if el_mode == ATTR_MODE_CLASS:
assert self._class_attr_name is None, 'Multiple class attributes are not supported.'
self._class_attr_name = el_name # depends on [control=['if'], data=[]]
else:
assert self.header_types[el_name] != ATTR_TYPE_CONTINUOUS, 'Non-class continuous attributes are not supported.' # depends on [control=['for'], data=['el']]
assert self._class_attr_name, 'A class attribute must be specified.' |
def _store_equal_samples(self, ncpus):
"""
sample quartets evenly across splits of the starting tree, and fills
in remaining samples with random quartet samples. Uses a hash dict to
not sample the same quartet twice, so for very large trees this can
take a few minutes to find millions of possible quartet samples.
"""
## choose chunker for h5 arr
breaks = 2
if self.params.nquartets < 5000:
breaks = 1
if self.params.nquartets > 100000:
breaks = 4
if self.params.nquartets > 500000:
breaks = 8
self._chunksize = (self.params.nquartets // (breaks * ncpus) + \
(self.params.nquartets % (breaks * ncpus)))
LOGGER.info("nquarts = %s, chunk = %s", self.params.nquartets, self._chunksize)
## create h5 OUT empty arrays
with h5py.File(self.database.output, 'w') as io5:
io5.create_dataset("quartets",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4))
io5.create_dataset("qstats",
(self.params.nquartets, 4),
dtype=np.uint32,
chunks=(self._chunksize, 4))
io5.create_group("qboots")
## get starting tree, unroot, randomly resolve, ladderize
tre = ete3.Tree(self.files.guidetreefile, format=0)
#tre = toytree.tree(self.files.guidetreefile, format=0)
tre.tree.unroot()
tre.tree.resolve_polytomy(recursive=True)
tre.tree.ladderize()
## randomly sample all splits of tree and convert tip names to indices
splits = [([self.samples.index(z.name) for z in i],
[self.samples.index(z.name) for z in j]) \
for (i, j) in tre.get_edges()]
## only keep internal splits (no single tips edges)
## this seemed to cause problems with unsampled tips
splits = [i for i in splits if all([len(j) > 1 for j in i])]
## turn each into an iterable split sampler
## if the nquartets for that split is small, then sample all of them
## if it is big, then make it a random sampler from that split
qiters = []
## how many min quartets are we gonna sample from each split?
squarts = self.params.nquartets // len(splits)
## how many iterators can be sampled to saturation?
nsaturation = 0
for split in splits:
## if small number at this split then sample all possible sets
## we will exhaust this quickly and then switch to random for
## the larger splits.
if n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2) < squarts*2:
qiter = (i+j for (i, j) in itertools.product(
itertools.combinations(split[0], 2),
itertools.combinations(split[1], 2)))
nsaturation += 1
## else create random sampler across that split, this is slower
## because it can propose the same split repeatedly and so we
## have to check it against the 'sampled' set.
else:
qiter = (random_product(split[0], split[1]) for _ \
in xrange(self.params.nquartets))
nsaturation += 1
## store all iterators into a list
qiters.append(qiter)
#for split in splits:
# print(split)
## make qiters infinitely cycling
qiters = itertools.cycle(qiters)
cycler = itertools.cycle(range(len(splits)))
## store visiting quartets
sampled = set()
## iterate over qiters sampling from each, if one runs out, keep
## sampling from remaining qiters. Keep going until samples is filled
with h5py.File(self.database.input, 'a') as io5:
## create data sets
io5.create_dataset("samples",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4),
compression='gzip')
## fill chunksize at a time for efficiency
i = 0
empty = set()
edge_targeted = 0
random_target = 0
## keep filling quartets until nquartets are sampled
while i < self.params.nquartets:
qdat = []
## keep filling this chunk until its full
while len(qdat) < self._chunksize:
## grab the next iterator
qiter = qiters.next()
cycle = cycler.next()
## sample from iterator
try:
qrtsamp = qiter.next()
if tuple(qrtsamp) not in sampled:
qdat.append(qrtsamp)
sampled.add(qrtsamp)
edge_targeted += 1
#else:
# print('repeat')
## unless iterator is empty, then skip it
except StopIteration:
empty.add(cycle)
## break when all edge samplers are empty
if len(empty) == nsaturation:
break
## if array is not full then add random samples
while len(qdat) < self._chunksize:
qrtsamp = random_combination(range(len(self.samples)), 4)
if tuple(qrtsamp) not in sampled:
qdat.append(qrtsamp)
sampled.add(qrtsamp)
random_target += 1
## stick chunk into h5 array
dat = np.array(qdat, dtype=np.uint16)
io5["samples"][i:i+self._chunksize] = dat[:io5["samples"].shape[0] - i]
i += self._chunksize
print(" equal sampling: {} edge quartets, {} random quartets "\
.format(edge_targeted, random_target)) | def function[_store_equal_samples, parameter[self, ncpus]]:
constant[
sample quartets evenly across splits of the starting tree, and fills
in remaining samples with random quartet samples. Uses a hash dict to
not sample the same quartet twice, so for very large trees this can
take a few minutes to find millions of possible quartet samples.
]
variable[breaks] assign[=] constant[2]
if compare[name[self].params.nquartets less[<] constant[5000]] begin[:]
variable[breaks] assign[=] constant[1]
if compare[name[self].params.nquartets greater[>] constant[100000]] begin[:]
variable[breaks] assign[=] constant[4]
if compare[name[self].params.nquartets greater[>] constant[500000]] begin[:]
variable[breaks] assign[=] constant[8]
name[self]._chunksize assign[=] binary_operation[binary_operation[name[self].params.nquartets <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[name[breaks] * name[ncpus]]] + binary_operation[name[self].params.nquartets <ast.Mod object at 0x7da2590d6920> binary_operation[name[breaks] * name[ncpus]]]]
call[name[LOGGER].info, parameter[constant[nquarts = %s, chunk = %s], name[self].params.nquartets, name[self]._chunksize]]
with call[name[h5py].File, parameter[name[self].database.output, constant[w]]] begin[:]
call[name[io5].create_dataset, parameter[constant[quartets], tuple[[<ast.Attribute object at 0x7da1b004dff0>, <ast.Constant object at 0x7da1b004e2c0>]]]]
call[name[io5].create_dataset, parameter[constant[qstats], tuple[[<ast.Attribute object at 0x7da1b004f160>, <ast.Constant object at 0x7da1b004f490>]]]]
call[name[io5].create_group, parameter[constant[qboots]]]
variable[tre] assign[=] call[name[ete3].Tree, parameter[name[self].files.guidetreefile]]
call[name[tre].tree.unroot, parameter[]]
call[name[tre].tree.resolve_polytomy, parameter[]]
call[name[tre].tree.ladderize, parameter[]]
variable[splits] assign[=] <ast.ListComp object at 0x7da1b00fcc40>
variable[splits] assign[=] <ast.ListComp object at 0x7da1b00fd300>
variable[qiters] assign[=] list[[]]
variable[squarts] assign[=] binary_operation[name[self].params.nquartets <ast.FloorDiv object at 0x7da2590d6bc0> call[name[len], parameter[name[splits]]]]
variable[nsaturation] assign[=] constant[0]
for taget[name[split]] in starred[name[splits]] begin[:]
if compare[binary_operation[call[name[n_choose_k], parameter[call[name[len], parameter[call[name[split]][constant[0]]]], constant[2]]] * call[name[n_choose_k], parameter[call[name[len], parameter[call[name[split]][constant[1]]]], constant[2]]]] less[<] binary_operation[name[squarts] * constant[2]]] begin[:]
variable[qiter] assign[=] <ast.GeneratorExp object at 0x7da1b00fd8d0>
<ast.AugAssign object at 0x7da1b00fd0f0>
call[name[qiters].append, parameter[name[qiter]]]
variable[qiters] assign[=] call[name[itertools].cycle, parameter[name[qiters]]]
variable[cycler] assign[=] call[name[itertools].cycle, parameter[call[name[range], parameter[call[name[len], parameter[name[splits]]]]]]]
variable[sampled] assign[=] call[name[set], parameter[]]
with call[name[h5py].File, parameter[name[self].database.input, constant[a]]] begin[:]
call[name[io5].create_dataset, parameter[constant[samples], tuple[[<ast.Attribute object at 0x7da18eb55fc0>, <ast.Constant object at 0x7da18eb567d0>]]]]
variable[i] assign[=] constant[0]
variable[empty] assign[=] call[name[set], parameter[]]
variable[edge_targeted] assign[=] constant[0]
variable[random_target] assign[=] constant[0]
while compare[name[i] less[<] name[self].params.nquartets] begin[:]
variable[qdat] assign[=] list[[]]
while compare[call[name[len], parameter[name[qdat]]] less[<] name[self]._chunksize] begin[:]
variable[qiter] assign[=] call[name[qiters].next, parameter[]]
variable[cycle] assign[=] call[name[cycler].next, parameter[]]
<ast.Try object at 0x7da18eb55180>
if compare[call[name[len], parameter[name[empty]]] equal[==] name[nsaturation]] begin[:]
break
while compare[call[name[len], parameter[name[qdat]]] less[<] name[self]._chunksize] begin[:]
variable[qrtsamp] assign[=] call[name[random_combination], parameter[call[name[range], parameter[call[name[len], parameter[name[self].samples]]]], constant[4]]]
if compare[call[name[tuple], parameter[name[qrtsamp]]] <ast.NotIn object at 0x7da2590d7190> name[sampled]] begin[:]
call[name[qdat].append, parameter[name[qrtsamp]]]
call[name[sampled].add, parameter[name[qrtsamp]]]
<ast.AugAssign object at 0x7da18eb560b0>
variable[dat] assign[=] call[name[np].array, parameter[name[qdat]]]
call[call[name[io5]][constant[samples]]][<ast.Slice object at 0x7da18eb54a60>] assign[=] call[name[dat]][<ast.Slice object at 0x7da18eb55e40>]
<ast.AugAssign object at 0x7da18eb553c0>
call[name[print], parameter[call[constant[ equal sampling: {} edge quartets, {} random quartets ].format, parameter[name[edge_targeted], name[random_target]]]]] | keyword[def] identifier[_store_equal_samples] ( identifier[self] , identifier[ncpus] ):
literal[string]
identifier[breaks] = literal[int]
keyword[if] identifier[self] . identifier[params] . identifier[nquartets] < literal[int] :
identifier[breaks] = literal[int]
keyword[if] identifier[self] . identifier[params] . identifier[nquartets] > literal[int] :
identifier[breaks] = literal[int]
keyword[if] identifier[self] . identifier[params] . identifier[nquartets] > literal[int] :
identifier[breaks] = literal[int]
identifier[self] . identifier[_chunksize] =( identifier[self] . identifier[params] . identifier[nquartets] //( identifier[breaks] * identifier[ncpus] )+( identifier[self] . identifier[params] . identifier[nquartets] %( identifier[breaks] * identifier[ncpus] )))
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[self] . identifier[params] . identifier[nquartets] , identifier[self] . identifier[_chunksize] )
keyword[with] identifier[h5py] . identifier[File] ( identifier[self] . identifier[database] . identifier[output] , literal[string] ) keyword[as] identifier[io5] :
identifier[io5] . identifier[create_dataset] ( literal[string] ,
( identifier[self] . identifier[params] . identifier[nquartets] , literal[int] ),
identifier[dtype] = identifier[np] . identifier[uint16] ,
identifier[chunks] =( identifier[self] . identifier[_chunksize] , literal[int] ))
identifier[io5] . identifier[create_dataset] ( literal[string] ,
( identifier[self] . identifier[params] . identifier[nquartets] , literal[int] ),
identifier[dtype] = identifier[np] . identifier[uint32] ,
identifier[chunks] =( identifier[self] . identifier[_chunksize] , literal[int] ))
identifier[io5] . identifier[create_group] ( literal[string] )
identifier[tre] = identifier[ete3] . identifier[Tree] ( identifier[self] . identifier[files] . identifier[guidetreefile] , identifier[format] = literal[int] )
identifier[tre] . identifier[tree] . identifier[unroot] ()
identifier[tre] . identifier[tree] . identifier[resolve_polytomy] ( identifier[recursive] = keyword[True] )
identifier[tre] . identifier[tree] . identifier[ladderize] ()
identifier[splits] =[([ identifier[self] . identifier[samples] . identifier[index] ( identifier[z] . identifier[name] ) keyword[for] identifier[z] keyword[in] identifier[i] ],
[ identifier[self] . identifier[samples] . identifier[index] ( identifier[z] . identifier[name] ) keyword[for] identifier[z] keyword[in] identifier[j] ]) keyword[for] ( identifier[i] , identifier[j] ) keyword[in] identifier[tre] . identifier[get_edges] ()]
identifier[splits] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[splits] keyword[if] identifier[all] ([ identifier[len] ( identifier[j] )> literal[int] keyword[for] identifier[j] keyword[in] identifier[i] ])]
identifier[qiters] =[]
identifier[squarts] = identifier[self] . identifier[params] . identifier[nquartets] // identifier[len] ( identifier[splits] )
identifier[nsaturation] = literal[int]
keyword[for] identifier[split] keyword[in] identifier[splits] :
keyword[if] identifier[n_choose_k] ( identifier[len] ( identifier[split] [ literal[int] ]), literal[int] )* identifier[n_choose_k] ( identifier[len] ( identifier[split] [ literal[int] ]), literal[int] )< identifier[squarts] * literal[int] :
identifier[qiter] =( identifier[i] + identifier[j] keyword[for] ( identifier[i] , identifier[j] ) keyword[in] identifier[itertools] . identifier[product] (
identifier[itertools] . identifier[combinations] ( identifier[split] [ literal[int] ], literal[int] ),
identifier[itertools] . identifier[combinations] ( identifier[split] [ literal[int] ], literal[int] )))
identifier[nsaturation] += literal[int]
keyword[else] :
identifier[qiter] =( identifier[random_product] ( identifier[split] [ literal[int] ], identifier[split] [ literal[int] ]) keyword[for] identifier[_] keyword[in] identifier[xrange] ( identifier[self] . identifier[params] . identifier[nquartets] ))
identifier[nsaturation] += literal[int]
identifier[qiters] . identifier[append] ( identifier[qiter] )
identifier[qiters] = identifier[itertools] . identifier[cycle] ( identifier[qiters] )
identifier[cycler] = identifier[itertools] . identifier[cycle] ( identifier[range] ( identifier[len] ( identifier[splits] )))
identifier[sampled] = identifier[set] ()
keyword[with] identifier[h5py] . identifier[File] ( identifier[self] . identifier[database] . identifier[input] , literal[string] ) keyword[as] identifier[io5] :
identifier[io5] . identifier[create_dataset] ( literal[string] ,
( identifier[self] . identifier[params] . identifier[nquartets] , literal[int] ),
identifier[dtype] = identifier[np] . identifier[uint16] ,
identifier[chunks] =( identifier[self] . identifier[_chunksize] , literal[int] ),
identifier[compression] = literal[string] )
identifier[i] = literal[int]
identifier[empty] = identifier[set] ()
identifier[edge_targeted] = literal[int]
identifier[random_target] = literal[int]
keyword[while] identifier[i] < identifier[self] . identifier[params] . identifier[nquartets] :
identifier[qdat] =[]
keyword[while] identifier[len] ( identifier[qdat] )< identifier[self] . identifier[_chunksize] :
identifier[qiter] = identifier[qiters] . identifier[next] ()
identifier[cycle] = identifier[cycler] . identifier[next] ()
keyword[try] :
identifier[qrtsamp] = identifier[qiter] . identifier[next] ()
keyword[if] identifier[tuple] ( identifier[qrtsamp] ) keyword[not] keyword[in] identifier[sampled] :
identifier[qdat] . identifier[append] ( identifier[qrtsamp] )
identifier[sampled] . identifier[add] ( identifier[qrtsamp] )
identifier[edge_targeted] += literal[int]
keyword[except] identifier[StopIteration] :
identifier[empty] . identifier[add] ( identifier[cycle] )
keyword[if] identifier[len] ( identifier[empty] )== identifier[nsaturation] :
keyword[break]
keyword[while] identifier[len] ( identifier[qdat] )< identifier[self] . identifier[_chunksize] :
identifier[qrtsamp] = identifier[random_combination] ( identifier[range] ( identifier[len] ( identifier[self] . identifier[samples] )), literal[int] )
keyword[if] identifier[tuple] ( identifier[qrtsamp] ) keyword[not] keyword[in] identifier[sampled] :
identifier[qdat] . identifier[append] ( identifier[qrtsamp] )
identifier[sampled] . identifier[add] ( identifier[qrtsamp] )
identifier[random_target] += literal[int]
identifier[dat] = identifier[np] . identifier[array] ( identifier[qdat] , identifier[dtype] = identifier[np] . identifier[uint16] )
identifier[io5] [ literal[string] ][ identifier[i] : identifier[i] + identifier[self] . identifier[_chunksize] ]= identifier[dat] [: identifier[io5] [ literal[string] ]. identifier[shape] [ literal[int] ]- identifier[i] ]
identifier[i] += identifier[self] . identifier[_chunksize]
identifier[print] ( literal[string] . identifier[format] ( identifier[edge_targeted] , identifier[random_target] )) | def _store_equal_samples(self, ncpus):
"""
sample quartets evenly across splits of the starting tree, and fills
in remaining samples with random quartet samples. Uses a hash dict to
not sample the same quartet twice, so for very large trees this can
take a few minutes to find millions of possible quartet samples.
"""
## choose chunker for h5 arr
breaks = 2
if self.params.nquartets < 5000:
breaks = 1 # depends on [control=['if'], data=[]]
if self.params.nquartets > 100000:
breaks = 4 # depends on [control=['if'], data=[]]
if self.params.nquartets > 500000:
breaks = 8 # depends on [control=['if'], data=[]]
self._chunksize = self.params.nquartets // (breaks * ncpus) + self.params.nquartets % (breaks * ncpus)
LOGGER.info('nquarts = %s, chunk = %s', self.params.nquartets, self._chunksize)
## create h5 OUT empty arrays
with h5py.File(self.database.output, 'w') as io5:
io5.create_dataset('quartets', (self.params.nquartets, 4), dtype=np.uint16, chunks=(self._chunksize, 4))
io5.create_dataset('qstats', (self.params.nquartets, 4), dtype=np.uint32, chunks=(self._chunksize, 4))
io5.create_group('qboots') # depends on [control=['with'], data=['io5']]
## get starting tree, unroot, randomly resolve, ladderize
tre = ete3.Tree(self.files.guidetreefile, format=0)
#tre = toytree.tree(self.files.guidetreefile, format=0)
tre.tree.unroot()
tre.tree.resolve_polytomy(recursive=True)
tre.tree.ladderize()
## randomly sample all splits of tree and convert tip names to indices
splits = [([self.samples.index(z.name) for z in i], [self.samples.index(z.name) for z in j]) for (i, j) in tre.get_edges()]
## only keep internal splits (no single tips edges)
## this seemed to cause problems with unsampled tips
splits = [i for i in splits if all([len(j) > 1 for j in i])]
## turn each into an iterable split sampler
## if the nquartets for that split is small, then sample all of them
## if it is big, then make it a random sampler from that split
qiters = []
## how many min quartets are we gonna sample from each split?
squarts = self.params.nquartets // len(splits)
## how many iterators can be sampled to saturation?
nsaturation = 0
for split in splits:
## if small number at this split then sample all possible sets
## we will exhaust this quickly and then switch to random for
## the larger splits.
if n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2) < squarts * 2:
qiter = (i + j for (i, j) in itertools.product(itertools.combinations(split[0], 2), itertools.combinations(split[1], 2)))
nsaturation += 1 # depends on [control=['if'], data=[]]
else:
## else create random sampler across that split, this is slower
## because it can propose the same split repeatedly and so we
## have to check it against the 'sampled' set.
qiter = (random_product(split[0], split[1]) for _ in xrange(self.params.nquartets))
nsaturation += 1
## store all iterators into a list
qiters.append(qiter) # depends on [control=['for'], data=['split']]
#for split in splits:
# print(split)
## make qiters infinitely cycling
qiters = itertools.cycle(qiters)
cycler = itertools.cycle(range(len(splits)))
## store visiting quartets
sampled = set() ## iterate over qiters sampling from each, if one runs out, keep
## sampling from remaining qiters. Keep going until samples is filled
with h5py.File(self.database.input, 'a') as io5:
## create data sets
io5.create_dataset('samples', (self.params.nquartets, 4), dtype=np.uint16, chunks=(self._chunksize, 4), compression='gzip')
## fill chunksize at a time for efficiency
i = 0
empty = set()
edge_targeted = 0
random_target = 0
## keep filling quartets until nquartets are sampled
while i < self.params.nquartets:
qdat = []
## keep filling this chunk until its full
while len(qdat) < self._chunksize:
## grab the next iterator
qiter = qiters.next()
cycle = cycler.next()
## sample from iterator
try:
qrtsamp = qiter.next()
if tuple(qrtsamp) not in sampled:
qdat.append(qrtsamp)
sampled.add(qrtsamp)
edge_targeted += 1 # depends on [control=['if'], data=['sampled']] # depends on [control=['try'], data=[]]
#else:
# print('repeat')
## unless iterator is empty, then skip it
except StopIteration:
empty.add(cycle) # depends on [control=['except'], data=[]]
## break when all edge samplers are empty
if len(empty) == nsaturation:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
## if array is not full then add random samples
while len(qdat) < self._chunksize:
qrtsamp = random_combination(range(len(self.samples)), 4)
if tuple(qrtsamp) not in sampled:
qdat.append(qrtsamp)
sampled.add(qrtsamp)
random_target += 1 # depends on [control=['if'], data=['sampled']] # depends on [control=['while'], data=[]]
## stick chunk into h5 array
dat = np.array(qdat, dtype=np.uint16)
io5['samples'][i:i + self._chunksize] = dat[:io5['samples'].shape[0] - i]
i += self._chunksize # depends on [control=['while'], data=['i']]
print(' equal sampling: {} edge quartets, {} random quartets '.format(edge_targeted, random_target)) # depends on [control=['with'], data=['io5']] |
def _interpolated_template(self, templateid):
"""Return an interpolator for the given template"""
phase, y = self._get_template_by_id(templateid)
# double-check that phase ranges from 0 to 1
assert phase.min() >= 0
assert phase.max() <= 1
# at the start and end points, we need to add ~5 points to make sure
# the spline & derivatives wrap appropriately
phase = np.concatenate([phase[-5:] - 1, phase, phase[:5] + 1])
y = np.concatenate([y[-5:], y, y[:5]])
# Univariate spline allows for derivatives; use this!
return UnivariateSpline(phase, y, s=0, k=5) | def function[_interpolated_template, parameter[self, templateid]]:
constant[Return an interpolator for the given template]
<ast.Tuple object at 0x7da1b05ee410> assign[=] call[name[self]._get_template_by_id, parameter[name[templateid]]]
assert[compare[call[name[phase].min, parameter[]] greater_or_equal[>=] constant[0]]]
assert[compare[call[name[phase].max, parameter[]] less_or_equal[<=] constant[1]]]
variable[phase] assign[=] call[name[np].concatenate, parameter[list[[<ast.BinOp object at 0x7da1b05aa2c0>, <ast.Name object at 0x7da1b05a8730>, <ast.BinOp object at 0x7da1b05a9b10>]]]]
variable[y] assign[=] call[name[np].concatenate, parameter[list[[<ast.Subscript object at 0x7da1b05aa050>, <ast.Name object at 0x7da1b05aa2f0>, <ast.Subscript object at 0x7da1b05aaa40>]]]]
return[call[name[UnivariateSpline], parameter[name[phase], name[y]]]] | keyword[def] identifier[_interpolated_template] ( identifier[self] , identifier[templateid] ):
literal[string]
identifier[phase] , identifier[y] = identifier[self] . identifier[_get_template_by_id] ( identifier[templateid] )
keyword[assert] identifier[phase] . identifier[min] ()>= literal[int]
keyword[assert] identifier[phase] . identifier[max] ()<= literal[int]
identifier[phase] = identifier[np] . identifier[concatenate] ([ identifier[phase] [- literal[int] :]- literal[int] , identifier[phase] , identifier[phase] [: literal[int] ]+ literal[int] ])
identifier[y] = identifier[np] . identifier[concatenate] ([ identifier[y] [- literal[int] :], identifier[y] , identifier[y] [: literal[int] ]])
keyword[return] identifier[UnivariateSpline] ( identifier[phase] , identifier[y] , identifier[s] = literal[int] , identifier[k] = literal[int] ) | def _interpolated_template(self, templateid):
"""Return an interpolator for the given template"""
(phase, y) = self._get_template_by_id(templateid)
# double-check that phase ranges from 0 to 1
assert phase.min() >= 0
assert phase.max() <= 1
# at the start and end points, we need to add ~5 points to make sure
# the spline & derivatives wrap appropriately
phase = np.concatenate([phase[-5:] - 1, phase, phase[:5] + 1])
y = np.concatenate([y[-5:], y, y[:5]])
# Univariate spline allows for derivatives; use this!
return UnivariateSpline(phase, y, s=0, k=5) |
def _dict_values_sorted_by_key(dictionary):
# This should be a yield from instead.
"""Internal helper to return the values of a dictionary, sorted by key.
"""
for _, value in sorted(dictionary.iteritems(), key=operator.itemgetter(0)):
yield value | def function[_dict_values_sorted_by_key, parameter[dictionary]]:
constant[Internal helper to return the values of a dictionary, sorted by key.
]
for taget[tuple[[<ast.Name object at 0x7da1b0b810f0>, <ast.Name object at 0x7da1b0b80820>]]] in starred[call[name[sorted], parameter[call[name[dictionary].iteritems, parameter[]]]]] begin[:]
<ast.Yield object at 0x7da1b0b82c50> | keyword[def] identifier[_dict_values_sorted_by_key] ( identifier[dictionary] ):
literal[string]
keyword[for] identifier[_] , identifier[value] keyword[in] identifier[sorted] ( identifier[dictionary] . identifier[iteritems] (), identifier[key] = identifier[operator] . identifier[itemgetter] ( literal[int] )):
keyword[yield] identifier[value] | def _dict_values_sorted_by_key(dictionary):
# This should be a yield from instead.
'Internal helper to return the values of a dictionary, sorted by key.\n '
for (_, value) in sorted(dictionary.iteritems(), key=operator.itemgetter(0)):
yield value # depends on [control=['for'], data=[]] |
def get_show_function_result(self, ddoc_id, show_name, doc_id):
"""
Retrieves a formatted document from the specified database
based on the show function provided. Show functions, for example,
are used when you want to access Cloudant directly from a browser,
and need data to be returned in a different format, such as HTML.
For example:
.. code-block:: python
# Assuming that 'view001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve a formatted 'doc001' document where the show function is 'show001'
resp = db.get_show_function_result('ddoc001', 'show001', 'doc001')
for row in resp['rows']:
# Process data (in text format).
For more detail on show functions, refer to the
`Cloudant show documentation <https://console.bluemix.net/docs/services/Cloudant/api/
design_documents.html#show-functions>`_.
:param str ddoc_id: Design document id used to get the result.
:param str show_name: Name used in part to identify the
show function.
:param str doc_id: The ID of the document to show.
:return: Formatted document result data in text format
"""
ddoc = DesignDocument(self, ddoc_id)
headers = {'Content-Type': 'application/json'}
resp = get_docs(self.r_session,
'/'.join([ddoc.document_url, '_show', show_name, doc_id]),
self.client.encoder,
headers)
return resp.text | def function[get_show_function_result, parameter[self, ddoc_id, show_name, doc_id]]:
constant[
Retrieves a formatted document from the specified database
based on the show function provided. Show functions, for example,
are used when you want to access Cloudant directly from a browser,
and need data to be returned in a different format, such as HTML.
For example:
.. code-block:: python
# Assuming that 'view001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve a formatted 'doc001' document where the show function is 'show001'
resp = db.get_show_function_result('ddoc001', 'show001', 'doc001')
for row in resp['rows']:
# Process data (in text format).
For more detail on show functions, refer to the
`Cloudant show documentation <https://console.bluemix.net/docs/services/Cloudant/api/
design_documents.html#show-functions>`_.
:param str ddoc_id: Design document id used to get the result.
:param str show_name: Name used in part to identify the
show function.
:param str doc_id: The ID of the document to show.
:return: Formatted document result data in text format
]
variable[ddoc] assign[=] call[name[DesignDocument], parameter[name[self], name[ddoc_id]]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c8b80>], [<ast.Constant object at 0x7da20c7cb910>]]
variable[resp] assign[=] call[name[get_docs], parameter[name[self].r_session, call[constant[/].join, parameter[list[[<ast.Attribute object at 0x7da20c7cb640>, <ast.Constant object at 0x7da20c7ca4a0>, <ast.Name object at 0x7da20c7c92a0>, <ast.Name object at 0x7da20c7cbb80>]]]], name[self].client.encoder, name[headers]]]
return[name[resp].text] | keyword[def] identifier[get_show_function_result] ( identifier[self] , identifier[ddoc_id] , identifier[show_name] , identifier[doc_id] ):
literal[string]
identifier[ddoc] = identifier[DesignDocument] ( identifier[self] , identifier[ddoc_id] )
identifier[headers] ={ literal[string] : literal[string] }
identifier[resp] = identifier[get_docs] ( identifier[self] . identifier[r_session] ,
literal[string] . identifier[join] ([ identifier[ddoc] . identifier[document_url] , literal[string] , identifier[show_name] , identifier[doc_id] ]),
identifier[self] . identifier[client] . identifier[encoder] ,
identifier[headers] )
keyword[return] identifier[resp] . identifier[text] | def get_show_function_result(self, ddoc_id, show_name, doc_id):
"""
Retrieves a formatted document from the specified database
based on the show function provided. Show functions, for example,
are used when you want to access Cloudant directly from a browser,
and need data to be returned in a different format, such as HTML.
For example:
.. code-block:: python
# Assuming that 'view001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve a formatted 'doc001' document where the show function is 'show001'
resp = db.get_show_function_result('ddoc001', 'show001', 'doc001')
for row in resp['rows']:
# Process data (in text format).
For more detail on show functions, refer to the
`Cloudant show documentation <https://console.bluemix.net/docs/services/Cloudant/api/
design_documents.html#show-functions>`_.
:param str ddoc_id: Design document id used to get the result.
:param str show_name: Name used in part to identify the
show function.
:param str doc_id: The ID of the document to show.
:return: Formatted document result data in text format
"""
ddoc = DesignDocument(self, ddoc_id)
headers = {'Content-Type': 'application/json'}
resp = get_docs(self.r_session, '/'.join([ddoc.document_url, '_show', show_name, doc_id]), self.client.encoder, headers)
return resp.text |
def density2d(data,
channels=[0,1],
bins=1024,
gate_fraction=0.65,
xscale='logicle',
yscale='logicle',
sigma=10.0,
full_output=False):
"""
Gate that preserves events in the region with highest density.
Gate out all events in `data` but those near regions of highest
density for the two specified channels.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : list of int, list of str, optional
Two channels on which to perform gating.
bins : int or array_like or [int, int] or [array, array], optional
Bins used for gating:
- If None, use ``data.hist_bins`` to obtain bin edges for both
axes. None is not allowed if ``data.hist_bins`` is not
available.
- If int, `bins` specifies the number of bins to use for both
axes. If ``data.hist_bins`` exists, it will be used to generate
a number `bins` of bins.
- If array_like, `bins` directly specifies the bin edges to use
for both axes.
- If [int, int], each element of `bins` specifies the number of
bins for each axis. If ``data.hist_bins`` exists, use it to
generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.
- If [array, array], each element of `bins` directly specifies
the bin edges to use for each axis.
- Any combination of the above, such as [int, array], [None,
int], or [array, int]. In this case, None indicates to generate
bin edges using ``data.hist_bins`` as above, int indicates the
number of bins to generate, and an array directly indicates the
bin edges. Note that None is not allowed if ``data.hist_bins``
does not exist.
gate_fraction : float, optional
Fraction of events to retain after gating. Should be between 0 and
1, inclusive.
xscale : str, optional
Scale of the bins generated for the x axis, either ``linear``,
``log``, or ``logicle``. `xscale` is ignored in `bins` is an array
or a list of arrays.
yscale : str, optional
Scale of the bins generated for the y axis, either ``linear``,
``log``, or ``logicle``. `yscale` is ignored in `bins` is an array
or a list of arrays.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel used by
`scipy.ndimage.filters.gaussian_filter` to smooth 2D histogram
into a density.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
contour : list of 2D numpy arrays, only if ``full_output==True``
List of 2D numpy array(s) of x-y coordinates tracing out
the edge of the gated region.
Raises
------
ValueError
If more or less than 2 channels are specified.
ValueError
If `data` has less than 2 dimensions or less than 2 events.
Exception
If an unrecognized matplotlib Path code is encountered when
attempting to generate contours.
Notes
-----
The algorithm for gating based on density works as follows:
1) Calculate 2D histogram of `data` in the specified channels.
2) Map each event from `data` to its histogram bin (implicitly
gating out any events which exist outside specified `bins`).
3) Use `gate_fraction` to determine number of events to retain
(rounded up). Only events which are not implicitly gated out
are considered.
4) Smooth 2D histogram using a 2D Gaussian filter.
5) Normalize smoothed histogram to obtain valid probability mass
function (PMF).
6) Sort bins by probability.
7) Accumulate events (starting with events belonging to bin with
highest probability ("densest") and proceeding to events
belonging to bins with lowest probability) until at least the
desired number of events is achieved. While the algorithm
attempts to get as close to `gate_fraction` fraction of events
as possible, more events may be retained based on how many
events fall into each histogram bin (since entire bins are
retained at a time, not individual events).
"""
# Extract channels in which to gate
if len(channels) != 2:
raise ValueError('2 channels should be specified')
data_ch = data[:,channels]
if data_ch.ndim == 1:
data_ch = data_ch.reshape((-1,1))
# Check gating fraction
if gate_fraction < 0 or gate_fraction > 1:
raise ValueError('gate fraction should be between 0 and 1, inclusive')
# Check dimensions
if data_ch.ndim < 2:
raise ValueError('data should have at least 2 dimensions')
if data_ch.shape[0] <= 1:
raise ValueError('data should have more than one event')
# Build output namedtuple if necessary
if full_output:
Density2dGateOutput = collections.namedtuple(
'Density2dGateOutput',
['gated_data', 'mask', 'contour'])
# If ``data_ch.hist_bins()`` exists, obtain bin edges from it if
# necessary.
if hasattr(data_ch, 'hist_bins') and \
hasattr(data_ch.hist_bins, '__call__'):
# Check whether `bins` contains information for one or two axes
if hasattr(bins, '__iter__') and len(bins)==2:
# `bins` contains separate information for both axes
# If bins for the X axis is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
if not hasattr(bins[0], '__iter__'):
bins[0] = data_ch.hist_bins(channels=0,
nbins=bins[0],
scale=xscale)
# If bins for the Y axis is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
if not hasattr(bins[1], '__iter__'):
bins[1] = data_ch.hist_bins(channels=1,
nbins=bins[1],
scale=yscale)
else:
# `bins` contains information for one axis, which will be used
# twice.
# If bins is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
if not hasattr(bins, '__iter__'):
bins = [data_ch.hist_bins(channels=0,
nbins=bins,
scale=xscale),
data_ch.hist_bins(channels=1,
nbins=bins,
scale=yscale)]
# Make 2D histogram
H,xe,ye = np.histogram2d(data_ch[:,0], data_ch[:,1], bins=bins)
# Map each event to its histogram bin by sorting events into a 2D array of
# lists which mimics the histogram.
#
# Use np.digitize to calculate the histogram bin index for each event
# given the histogram bin edges. Note that the index returned by
# np.digitize is such that bins[i-1] <= x < bins[i], whereas indexing the
# histogram will result in the following: hist[i,j] = bin corresponding to
# xedges[i] <= x < xedges[i+1] and yedges[i] <= y < yedges[i+1].
# Therefore, we need to subtract 1 from the np.digitize result to be able
# to index into the appropriate bin in the histogram.
event_indices = np.arange(data_ch.shape[0])
x_bin_indices = np.digitize(data_ch[:,0], bins=xe) - 1
y_bin_indices = np.digitize(data_ch[:,1], bins=ye) - 1
# In the current version of numpy, there exists a disparity in how
# np.histogram and np.digitize treat the rightmost bin edge (np.digitize
# is not the strict inverse of np.histogram). Specifically, np.histogram
# treats the rightmost bin interval as fully closed (rightmost bin edge is
# included in rightmost bin), whereas np.digitize treats all bins as
# half-open (you can specify which side is closed and which side is open;
# `right` parameter). The expected behavior for this gating function is to
# mimic np.histogram behavior, so we must reconcile this disparity.
x_bin_indices[data_ch[:,0] == xe[-1]] = len(xe)-2
y_bin_indices[data_ch[:,1] == ye[-1]] = len(ye)-2
# Ignore (gate out) events which exist outside specified bins.
# `np.digitize()-1` will assign events less than `bins` to bin "-1" and
# events greater than `bins` to len(bins)-1.
outlier_mask = (
(x_bin_indices == -1) |
(x_bin_indices == len(xe)-1) |
(y_bin_indices == -1) |
(y_bin_indices == len(ye)-1))
event_indices = event_indices[~outlier_mask]
x_bin_indices = x_bin_indices[~outlier_mask]
y_bin_indices = y_bin_indices[~outlier_mask]
# Create a 2D array of lists mimicking the histogram to accumulate events
# associated with each bin.
filler = np.frompyfunc(lambda x: list(), 1, 1)
H_events = np.empty_like(H, dtype=np.object)
filler(H_events, H_events)
for event_idx, x_bin_idx, y_bin_idx in \
zip(event_indices, x_bin_indices, y_bin_indices):
H_events[x_bin_idx, y_bin_idx].append(event_idx)
# Determine number of events to keep. Only consider events which have not
# been thrown out as outliers.
n = int(np.ceil(gate_fraction*float(len(event_indices))))
# n = 0 edge case (e.g. if gate_fraction = 0.0); incorrectly handled below
if n == 0:
mask = np.zeros(shape=data_ch.shape[0], dtype=bool)
gated_data = data[mask]
if full_output:
return Density2dGateOutput(
gated_data=gated_data, mask=mask, contour=[])
else:
return gated_data
# Smooth 2D histogram
sH = scipy.ndimage.filters.gaussian_filter(
H,
sigma=sigma,
order=0,
mode='constant',
cval=0.0,
truncate=6.0)
# Normalize smoothed histogram to make it a valid probability mass function
D = sH / np.sum(sH)
# Sort bins by density
vD = D.ravel()
vH = H.ravel()
sidx = np.argsort(vD)[::-1]
svH = vH[sidx] # linearized counts array sorted by density
# Find minimum number of accepted bins needed to reach specified number
# of events
csvH = np.cumsum(svH)
Nidx = np.nonzero(csvH >= n)[0][0] # we want to include this index
# Get indices of events to keep
vH_events = H_events.ravel()
accepted_indices = vH_events[sidx[:(Nidx+1)]]
accepted_indices = np.array([item # flatten list of lists
for sublist in accepted_indices
for item in sublist])
accepted_indices = np.sort(accepted_indices)
# Convert list of accepted indices to boolean mask array
mask = np.zeros(shape=data.shape[0], dtype=bool)
mask[accepted_indices] = True
gated_data = data[mask]
if full_output:
# Use scikit-image to find the contour of the gated region
#
# To find the contour of the gated region, values in the 2D probability
# mass function ``D`` are used to trace contours at the level of the
# probability associated with the last accepted bin, ``vD[sidx[Nidx]]``.
# find_contours() specifies contours as collections of row and column
# indices into the density matrix. The row or column index may be
# interpolated (i.e. non-integer) for greater precision.
contours_ij = skimage.measure.find_contours(D, vD[sidx[Nidx]])
# Map contours from indices into density matrix to histogram x and y
# coordinate spaces (assume values in the density matrix are associated
# with histogram bin centers).
xc = (xe[:-1] + xe[1:]) / 2.0 # x-axis bin centers
yc = (ye[:-1] + ye[1:]) / 2.0 # y-axis bin centers
contours = [np.array([np.interp(contour_ij[:,0],
np.arange(len(xc)),
xc),
np.interp(contour_ij[:,1],
np.arange(len(yc)),
yc)]).T
for contour_ij in contours_ij]
return Density2dGateOutput(
gated_data=gated_data, mask=mask, contour=contours)
else:
return gated_data | def function[density2d, parameter[data, channels, bins, gate_fraction, xscale, yscale, sigma, full_output]]:
constant[
Gate that preserves events in the region with highest density.
Gate out all events in `data` but those near regions of highest
density for the two specified channels.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : list of int, list of str, optional
Two channels on which to perform gating.
bins : int or array_like or [int, int] or [array, array], optional
Bins used for gating:
- If None, use ``data.hist_bins`` to obtain bin edges for both
axes. None is not allowed if ``data.hist_bins`` is not
available.
- If int, `bins` specifies the number of bins to use for both
axes. If ``data.hist_bins`` exists, it will be used to generate
a number `bins` of bins.
- If array_like, `bins` directly specifies the bin edges to use
for both axes.
- If [int, int], each element of `bins` specifies the number of
bins for each axis. If ``data.hist_bins`` exists, use it to
generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.
- If [array, array], each element of `bins` directly specifies
the bin edges to use for each axis.
- Any combination of the above, such as [int, array], [None,
int], or [array, int]. In this case, None indicates to generate
bin edges using ``data.hist_bins`` as above, int indicates the
number of bins to generate, and an array directly indicates the
bin edges. Note that None is not allowed if ``data.hist_bins``
does not exist.
gate_fraction : float, optional
Fraction of events to retain after gating. Should be between 0 and
1, inclusive.
xscale : str, optional
Scale of the bins generated for the x axis, either ``linear``,
``log``, or ``logicle``. `xscale` is ignored in `bins` is an array
or a list of arrays.
yscale : str, optional
Scale of the bins generated for the y axis, either ``linear``,
``log``, or ``logicle``. `yscale` is ignored in `bins` is an array
or a list of arrays.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel used by
`scipy.ndimage.filters.gaussian_filter` to smooth 2D histogram
into a density.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
contour : list of 2D numpy arrays, only if ``full_output==True``
List of 2D numpy array(s) of x-y coordinates tracing out
the edge of the gated region.
Raises
------
ValueError
If more or less than 2 channels are specified.
ValueError
If `data` has less than 2 dimensions or less than 2 events.
Exception
If an unrecognized matplotlib Path code is encountered when
attempting to generate contours.
Notes
-----
The algorithm for gating based on density works as follows:
1) Calculate 2D histogram of `data` in the specified channels.
2) Map each event from `data` to its histogram bin (implicitly
gating out any events which exist outside specified `bins`).
3) Use `gate_fraction` to determine number of events to retain
(rounded up). Only events which are not implicitly gated out
are considered.
4) Smooth 2D histogram using a 2D Gaussian filter.
5) Normalize smoothed histogram to obtain valid probability mass
function (PMF).
6) Sort bins by probability.
7) Accumulate events (starting with events belonging to bin with
highest probability ("densest") and proceeding to events
belonging to bins with lowest probability) until at least the
desired number of events is achieved. While the algorithm
attempts to get as close to `gate_fraction` fraction of events
as possible, more events may be retained based on how many
events fall into each histogram bin (since entire bins are
retained at a time, not individual events).
]
if compare[call[name[len], parameter[name[channels]]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b1bfb820>
variable[data_ch] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da1b1bf9930>, <ast.Name object at 0x7da1b1bf94e0>]]]
if compare[name[data_ch].ndim equal[==] constant[1]] begin[:]
variable[data_ch] assign[=] call[name[data_ch].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b1bf9480>, <ast.Constant object at 0x7da1b1bf87f0>]]]]
if <ast.BoolOp object at 0x7da1b1bf89a0> begin[:]
<ast.Raise object at 0x7da1b1bf86a0>
if compare[name[data_ch].ndim less[<] constant[2]] begin[:]
<ast.Raise object at 0x7da1b1bf8550>
if compare[call[name[data_ch].shape][constant[0]] less_or_equal[<=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1bf8d90>
if name[full_output] begin[:]
variable[Density2dGateOutput] assign[=] call[name[collections].namedtuple, parameter[constant[Density2dGateOutput], list[[<ast.Constant object at 0x7da1b1bf8f10>, <ast.Constant object at 0x7da1b1bf8dc0>, <ast.Constant object at 0x7da1b1bf8e20>]]]]
if <ast.BoolOp object at 0x7da1b1bf8be0> begin[:]
if <ast.BoolOp object at 0x7da204621960> begin[:]
if <ast.UnaryOp object at 0x7da2046212a0> begin[:]
call[name[bins]][constant[0]] assign[=] call[name[data_ch].hist_bins, parameter[]]
if <ast.UnaryOp object at 0x7da204622f20> begin[:]
call[name[bins]][constant[1]] assign[=] call[name[data_ch].hist_bins, parameter[]]
<ast.Tuple object at 0x7da204621300> assign[=] call[name[np].histogram2d, parameter[call[name[data_ch]][tuple[[<ast.Slice object at 0x7da2046200a0>, <ast.Constant object at 0x7da204622650>]]], call[name[data_ch]][tuple[[<ast.Slice object at 0x7da204623280>, <ast.Constant object at 0x7da204620700>]]]]]
variable[event_indices] assign[=] call[name[np].arange, parameter[call[name[data_ch].shape][constant[0]]]]
variable[x_bin_indices] assign[=] binary_operation[call[name[np].digitize, parameter[call[name[data_ch]][tuple[[<ast.Slice object at 0x7da2046234c0>, <ast.Constant object at 0x7da204620490>]]]]] - constant[1]]
variable[y_bin_indices] assign[=] binary_operation[call[name[np].digitize, parameter[call[name[data_ch]][tuple[[<ast.Slice object at 0x7da204623220>, <ast.Constant object at 0x7da204621de0>]]]]] - constant[1]]
call[name[x_bin_indices]][compare[call[name[data_ch]][tuple[[<ast.Slice object at 0x7da204623b20>, <ast.Constant object at 0x7da204622dd0>]]] equal[==] call[name[xe]][<ast.UnaryOp object at 0x7da18bcc9000>]]] assign[=] binary_operation[call[name[len], parameter[name[xe]]] - constant[2]]
call[name[y_bin_indices]][compare[call[name[data_ch]][tuple[[<ast.Slice object at 0x7da18bcc9330>, <ast.Constant object at 0x7da18bccba00>]]] equal[==] call[name[ye]][<ast.UnaryOp object at 0x7da18bcc9ab0>]]] assign[=] binary_operation[call[name[len], parameter[name[ye]]] - constant[2]]
variable[outlier_mask] assign[=] binary_operation[binary_operation[binary_operation[compare[name[x_bin_indices] equal[==] <ast.UnaryOp object at 0x7da18bcc88e0>] <ast.BitOr object at 0x7da2590d6aa0> compare[name[x_bin_indices] equal[==] binary_operation[call[name[len], parameter[name[xe]]] - constant[1]]]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[y_bin_indices] equal[==] <ast.UnaryOp object at 0x7da18bcc82b0>]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[y_bin_indices] equal[==] binary_operation[call[name[len], parameter[name[ye]]] - constant[1]]]]
variable[event_indices] assign[=] call[name[event_indices]][<ast.UnaryOp object at 0x7da1b1ca2e00>]
variable[x_bin_indices] assign[=] call[name[x_bin_indices]][<ast.UnaryOp object at 0x7da1b1ca2bc0>]
variable[y_bin_indices] assign[=] call[name[y_bin_indices]][<ast.UnaryOp object at 0x7da1b1ca1f60>]
variable[filler] assign[=] call[name[np].frompyfunc, parameter[<ast.Lambda object at 0x7da1b1ca24d0>, constant[1], constant[1]]]
variable[H_events] assign[=] call[name[np].empty_like, parameter[name[H]]]
call[name[filler], parameter[name[H_events], name[H_events]]]
for taget[tuple[[<ast.Name object at 0x7da1b1ca1360>, <ast.Name object at 0x7da1b1ca3310>, <ast.Name object at 0x7da1b1ca1750>]]] in starred[call[name[zip], parameter[name[event_indices], name[x_bin_indices], name[y_bin_indices]]]] begin[:]
call[call[name[H_events]][tuple[[<ast.Name object at 0x7da1b1ca2110>, <ast.Name object at 0x7da1b1ca0c40>]]].append, parameter[name[event_idx]]]
variable[n] assign[=] call[name[int], parameter[call[name[np].ceil, parameter[binary_operation[name[gate_fraction] * call[name[float], parameter[call[name[len], parameter[name[event_indices]]]]]]]]]]
if compare[name[n] equal[==] constant[0]] begin[:]
variable[mask] assign[=] call[name[np].zeros, parameter[]]
variable[gated_data] assign[=] call[name[data]][name[mask]]
if name[full_output] begin[:]
return[call[name[Density2dGateOutput], parameter[]]]
variable[sH] assign[=] call[name[scipy].ndimage.filters.gaussian_filter, parameter[name[H]]]
variable[D] assign[=] binary_operation[name[sH] / call[name[np].sum, parameter[name[sH]]]]
variable[vD] assign[=] call[name[D].ravel, parameter[]]
variable[vH] assign[=] call[name[H].ravel, parameter[]]
variable[sidx] assign[=] call[call[name[np].argsort, parameter[name[vD]]]][<ast.Slice object at 0x7da1b1ca24a0>]
variable[svH] assign[=] call[name[vH]][name[sidx]]
variable[csvH] assign[=] call[name[np].cumsum, parameter[name[svH]]]
variable[Nidx] assign[=] call[call[call[name[np].nonzero, parameter[compare[name[csvH] greater_or_equal[>=] name[n]]]]][constant[0]]][constant[0]]
variable[vH_events] assign[=] call[name[H_events].ravel, parameter[]]
variable[accepted_indices] assign[=] call[name[vH_events]][call[name[sidx]][<ast.Slice object at 0x7da1b1ca1510>]]
variable[accepted_indices] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1ca1390>]]
variable[accepted_indices] assign[=] call[name[np].sort, parameter[name[accepted_indices]]]
variable[mask] assign[=] call[name[np].zeros, parameter[]]
call[name[mask]][name[accepted_indices]] assign[=] constant[True]
variable[gated_data] assign[=] call[name[data]][name[mask]]
if name[full_output] begin[:]
variable[contours_ij] assign[=] call[name[skimage].measure.find_contours, parameter[name[D], call[name[vD]][call[name[sidx]][name[Nidx]]]]]
variable[xc] assign[=] binary_operation[binary_operation[call[name[xe]][<ast.Slice object at 0x7da18f58ceb0>] + call[name[xe]][<ast.Slice object at 0x7da18f58fe50>]] / constant[2.0]]
variable[yc] assign[=] binary_operation[binary_operation[call[name[ye]][<ast.Slice object at 0x7da18f58d930>] + call[name[ye]][<ast.Slice object at 0x7da18f58edd0>]] / constant[2.0]]
variable[contours] assign[=] <ast.ListComp object at 0x7da18f58cdf0>
return[call[name[Density2dGateOutput], parameter[]]] | keyword[def] identifier[density2d] ( identifier[data] ,
identifier[channels] =[ literal[int] , literal[int] ],
identifier[bins] = literal[int] ,
identifier[gate_fraction] = literal[int] ,
identifier[xscale] = literal[string] ,
identifier[yscale] = literal[string] ,
identifier[sigma] = literal[int] ,
identifier[full_output] = keyword[False] ):
literal[string]
keyword[if] identifier[len] ( identifier[channels] )!= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[data_ch] = identifier[data] [:, identifier[channels] ]
keyword[if] identifier[data_ch] . identifier[ndim] == literal[int] :
identifier[data_ch] = identifier[data_ch] . identifier[reshape] ((- literal[int] , literal[int] ))
keyword[if] identifier[gate_fraction] < literal[int] keyword[or] identifier[gate_fraction] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[data_ch] . identifier[ndim] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[data_ch] . identifier[shape] [ literal[int] ]<= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[full_output] :
identifier[Density2dGateOutput] = identifier[collections] . identifier[namedtuple] (
literal[string] ,
[ literal[string] , literal[string] , literal[string] ])
keyword[if] identifier[hasattr] ( identifier[data_ch] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[data_ch] . identifier[hist_bins] , literal[string] ):
keyword[if] identifier[hasattr] ( identifier[bins] , literal[string] ) keyword[and] identifier[len] ( identifier[bins] )== literal[int] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[bins] [ literal[int] ], literal[string] ):
identifier[bins] [ literal[int] ]= identifier[data_ch] . identifier[hist_bins] ( identifier[channels] = literal[int] ,
identifier[nbins] = identifier[bins] [ literal[int] ],
identifier[scale] = identifier[xscale] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[bins] [ literal[int] ], literal[string] ):
identifier[bins] [ literal[int] ]= identifier[data_ch] . identifier[hist_bins] ( identifier[channels] = literal[int] ,
identifier[nbins] = identifier[bins] [ literal[int] ],
identifier[scale] = identifier[yscale] )
keyword[else] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[bins] , literal[string] ):
identifier[bins] =[ identifier[data_ch] . identifier[hist_bins] ( identifier[channels] = literal[int] ,
identifier[nbins] = identifier[bins] ,
identifier[scale] = identifier[xscale] ),
identifier[data_ch] . identifier[hist_bins] ( identifier[channels] = literal[int] ,
identifier[nbins] = identifier[bins] ,
identifier[scale] = identifier[yscale] )]
identifier[H] , identifier[xe] , identifier[ye] = identifier[np] . identifier[histogram2d] ( identifier[data_ch] [:, literal[int] ], identifier[data_ch] [:, literal[int] ], identifier[bins] = identifier[bins] )
identifier[event_indices] = identifier[np] . identifier[arange] ( identifier[data_ch] . identifier[shape] [ literal[int] ])
identifier[x_bin_indices] = identifier[np] . identifier[digitize] ( identifier[data_ch] [:, literal[int] ], identifier[bins] = identifier[xe] )- literal[int]
identifier[y_bin_indices] = identifier[np] . identifier[digitize] ( identifier[data_ch] [:, literal[int] ], identifier[bins] = identifier[ye] )- literal[int]
identifier[x_bin_indices] [ identifier[data_ch] [:, literal[int] ]== identifier[xe] [- literal[int] ]]= identifier[len] ( identifier[xe] )- literal[int]
identifier[y_bin_indices] [ identifier[data_ch] [:, literal[int] ]== identifier[ye] [- literal[int] ]]= identifier[len] ( identifier[ye] )- literal[int]
identifier[outlier_mask] =(
( identifier[x_bin_indices] ==- literal[int] )|
( identifier[x_bin_indices] == identifier[len] ( identifier[xe] )- literal[int] )|
( identifier[y_bin_indices] ==- literal[int] )|
( identifier[y_bin_indices] == identifier[len] ( identifier[ye] )- literal[int] ))
identifier[event_indices] = identifier[event_indices] [~ identifier[outlier_mask] ]
identifier[x_bin_indices] = identifier[x_bin_indices] [~ identifier[outlier_mask] ]
identifier[y_bin_indices] = identifier[y_bin_indices] [~ identifier[outlier_mask] ]
identifier[filler] = identifier[np] . identifier[frompyfunc] ( keyword[lambda] identifier[x] : identifier[list] (), literal[int] , literal[int] )
identifier[H_events] = identifier[np] . identifier[empty_like] ( identifier[H] , identifier[dtype] = identifier[np] . identifier[object] )
identifier[filler] ( identifier[H_events] , identifier[H_events] )
keyword[for] identifier[event_idx] , identifier[x_bin_idx] , identifier[y_bin_idx] keyword[in] identifier[zip] ( identifier[event_indices] , identifier[x_bin_indices] , identifier[y_bin_indices] ):
identifier[H_events] [ identifier[x_bin_idx] , identifier[y_bin_idx] ]. identifier[append] ( identifier[event_idx] )
identifier[n] = identifier[int] ( identifier[np] . identifier[ceil] ( identifier[gate_fraction] * identifier[float] ( identifier[len] ( identifier[event_indices] ))))
keyword[if] identifier[n] == literal[int] :
identifier[mask] = identifier[np] . identifier[zeros] ( identifier[shape] = identifier[data_ch] . identifier[shape] [ literal[int] ], identifier[dtype] = identifier[bool] )
identifier[gated_data] = identifier[data] [ identifier[mask] ]
keyword[if] identifier[full_output] :
keyword[return] identifier[Density2dGateOutput] (
identifier[gated_data] = identifier[gated_data] , identifier[mask] = identifier[mask] , identifier[contour] =[])
keyword[else] :
keyword[return] identifier[gated_data]
identifier[sH] = identifier[scipy] . identifier[ndimage] . identifier[filters] . identifier[gaussian_filter] (
identifier[H] ,
identifier[sigma] = identifier[sigma] ,
identifier[order] = literal[int] ,
identifier[mode] = literal[string] ,
identifier[cval] = literal[int] ,
identifier[truncate] = literal[int] )
identifier[D] = identifier[sH] / identifier[np] . identifier[sum] ( identifier[sH] )
identifier[vD] = identifier[D] . identifier[ravel] ()
identifier[vH] = identifier[H] . identifier[ravel] ()
identifier[sidx] = identifier[np] . identifier[argsort] ( identifier[vD] )[::- literal[int] ]
identifier[svH] = identifier[vH] [ identifier[sidx] ]
identifier[csvH] = identifier[np] . identifier[cumsum] ( identifier[svH] )
identifier[Nidx] = identifier[np] . identifier[nonzero] ( identifier[csvH] >= identifier[n] )[ literal[int] ][ literal[int] ]
identifier[vH_events] = identifier[H_events] . identifier[ravel] ()
identifier[accepted_indices] = identifier[vH_events] [ identifier[sidx] [:( identifier[Nidx] + literal[int] )]]
identifier[accepted_indices] = identifier[np] . identifier[array] ([ identifier[item]
keyword[for] identifier[sublist] keyword[in] identifier[accepted_indices]
keyword[for] identifier[item] keyword[in] identifier[sublist] ])
identifier[accepted_indices] = identifier[np] . identifier[sort] ( identifier[accepted_indices] )
identifier[mask] = identifier[np] . identifier[zeros] ( identifier[shape] = identifier[data] . identifier[shape] [ literal[int] ], identifier[dtype] = identifier[bool] )
identifier[mask] [ identifier[accepted_indices] ]= keyword[True]
identifier[gated_data] = identifier[data] [ identifier[mask] ]
keyword[if] identifier[full_output] :
identifier[contours_ij] = identifier[skimage] . identifier[measure] . identifier[find_contours] ( identifier[D] , identifier[vD] [ identifier[sidx] [ identifier[Nidx] ]])
identifier[xc] =( identifier[xe] [:- literal[int] ]+ identifier[xe] [ literal[int] :])/ literal[int]
identifier[yc] =( identifier[ye] [:- literal[int] ]+ identifier[ye] [ literal[int] :])/ literal[int]
identifier[contours] =[ identifier[np] . identifier[array] ([ identifier[np] . identifier[interp] ( identifier[contour_ij] [:, literal[int] ],
identifier[np] . identifier[arange] ( identifier[len] ( identifier[xc] )),
identifier[xc] ),
identifier[np] . identifier[interp] ( identifier[contour_ij] [:, literal[int] ],
identifier[np] . identifier[arange] ( identifier[len] ( identifier[yc] )),
identifier[yc] )]). identifier[T]
keyword[for] identifier[contour_ij] keyword[in] identifier[contours_ij] ]
keyword[return] identifier[Density2dGateOutput] (
identifier[gated_data] = identifier[gated_data] , identifier[mask] = identifier[mask] , identifier[contour] = identifier[contours] )
keyword[else] :
keyword[return] identifier[gated_data] | def density2d(data, channels=[0, 1], bins=1024, gate_fraction=0.65, xscale='logicle', yscale='logicle', sigma=10.0, full_output=False):
"""
Gate that preserves events in the region with highest density.
Gate out all events in `data` but those near regions of highest
density for the two specified channels.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : list of int, list of str, optional
Two channels on which to perform gating.
bins : int or array_like or [int, int] or [array, array], optional
Bins used for gating:
- If None, use ``data.hist_bins`` to obtain bin edges for both
axes. None is not allowed if ``data.hist_bins`` is not
available.
- If int, `bins` specifies the number of bins to use for both
axes. If ``data.hist_bins`` exists, it will be used to generate
a number `bins` of bins.
- If array_like, `bins` directly specifies the bin edges to use
for both axes.
- If [int, int], each element of `bins` specifies the number of
bins for each axis. If ``data.hist_bins`` exists, use it to
generate ``bins[0]`` and ``bins[1]`` bin edges, respectively.
- If [array, array], each element of `bins` directly specifies
the bin edges to use for each axis.
- Any combination of the above, such as [int, array], [None,
int], or [array, int]. In this case, None indicates to generate
bin edges using ``data.hist_bins`` as above, int indicates the
number of bins to generate, and an array directly indicates the
bin edges. Note that None is not allowed if ``data.hist_bins``
does not exist.
gate_fraction : float, optional
Fraction of events to retain after gating. Should be between 0 and
1, inclusive.
xscale : str, optional
Scale of the bins generated for the x axis, either ``linear``,
``log``, or ``logicle``. `xscale` is ignored in `bins` is an array
or a list of arrays.
yscale : str, optional
Scale of the bins generated for the y axis, either ``linear``,
``log``, or ``logicle``. `yscale` is ignored in `bins` is an array
or a list of arrays.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel used by
`scipy.ndimage.filters.gaussian_filter` to smooth 2D histogram
into a density.
full_output : bool, optional
Flag specifying to return additional outputs. If true, the outputs
are given as a namedtuple.
Returns
-------
gated_data : FCSData or numpy array
Gated flow cytometry data of the same format as `data`.
mask : numpy array of bool, only if ``full_output==True``
Boolean gate mask used to gate data such that ``gated_data =
data[mask]``.
contour : list of 2D numpy arrays, only if ``full_output==True``
List of 2D numpy array(s) of x-y coordinates tracing out
the edge of the gated region.
Raises
------
ValueError
If more or less than 2 channels are specified.
ValueError
If `data` has less than 2 dimensions or less than 2 events.
Exception
If an unrecognized matplotlib Path code is encountered when
attempting to generate contours.
Notes
-----
The algorithm for gating based on density works as follows:
1) Calculate 2D histogram of `data` in the specified channels.
2) Map each event from `data` to its histogram bin (implicitly
gating out any events which exist outside specified `bins`).
3) Use `gate_fraction` to determine number of events to retain
(rounded up). Only events which are not implicitly gated out
are considered.
4) Smooth 2D histogram using a 2D Gaussian filter.
5) Normalize smoothed histogram to obtain valid probability mass
function (PMF).
6) Sort bins by probability.
7) Accumulate events (starting with events belonging to bin with
highest probability ("densest") and proceeding to events
belonging to bins with lowest probability) until at least the
desired number of events is achieved. While the algorithm
attempts to get as close to `gate_fraction` fraction of events
as possible, more events may be retained based on how many
events fall into each histogram bin (since entire bins are
retained at a time, not individual events).
"""
# Extract channels in which to gate
if len(channels) != 2:
raise ValueError('2 channels should be specified') # depends on [control=['if'], data=[]]
data_ch = data[:, channels]
if data_ch.ndim == 1:
data_ch = data_ch.reshape((-1, 1)) # depends on [control=['if'], data=[]]
# Check gating fraction
if gate_fraction < 0 or gate_fraction > 1:
raise ValueError('gate fraction should be between 0 and 1, inclusive') # depends on [control=['if'], data=[]]
# Check dimensions
if data_ch.ndim < 2:
raise ValueError('data should have at least 2 dimensions') # depends on [control=['if'], data=[]]
if data_ch.shape[0] <= 1:
raise ValueError('data should have more than one event') # depends on [control=['if'], data=[]]
# Build output namedtuple if necessary
if full_output:
Density2dGateOutput = collections.namedtuple('Density2dGateOutput', ['gated_data', 'mask', 'contour']) # depends on [control=['if'], data=[]]
# If ``data_ch.hist_bins()`` exists, obtain bin edges from it if
# necessary.
if hasattr(data_ch, 'hist_bins') and hasattr(data_ch.hist_bins, '__call__'):
# Check whether `bins` contains information for one or two axes
if hasattr(bins, '__iter__') and len(bins) == 2:
# `bins` contains separate information for both axes
# If bins for the X axis is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
if not hasattr(bins[0], '__iter__'):
bins[0] = data_ch.hist_bins(channels=0, nbins=bins[0], scale=xscale) # depends on [control=['if'], data=[]]
# If bins for the Y axis is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
if not hasattr(bins[1], '__iter__'):
bins[1] = data_ch.hist_bins(channels=1, nbins=bins[1], scale=yscale) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# `bins` contains information for one axis, which will be used
# twice.
# If bins is not an iterable, get bin edges from
# ``data_ch.hist_bins()``.
elif not hasattr(bins, '__iter__'):
bins = [data_ch.hist_bins(channels=0, nbins=bins, scale=xscale), data_ch.hist_bins(channels=1, nbins=bins, scale=yscale)] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Make 2D histogram
(H, xe, ye) = np.histogram2d(data_ch[:, 0], data_ch[:, 1], bins=bins)
# Map each event to its histogram bin by sorting events into a 2D array of
# lists which mimics the histogram.
#
# Use np.digitize to calculate the histogram bin index for each event
# given the histogram bin edges. Note that the index returned by
# np.digitize is such that bins[i-1] <= x < bins[i], whereas indexing the
# histogram will result in the following: hist[i,j] = bin corresponding to
# xedges[i] <= x < xedges[i+1] and yedges[i] <= y < yedges[i+1].
# Therefore, we need to subtract 1 from the np.digitize result to be able
# to index into the appropriate bin in the histogram.
event_indices = np.arange(data_ch.shape[0])
x_bin_indices = np.digitize(data_ch[:, 0], bins=xe) - 1
y_bin_indices = np.digitize(data_ch[:, 1], bins=ye) - 1
# In the current version of numpy, there exists a disparity in how
# np.histogram and np.digitize treat the rightmost bin edge (np.digitize
# is not the strict inverse of np.histogram). Specifically, np.histogram
# treats the rightmost bin interval as fully closed (rightmost bin edge is
# included in rightmost bin), whereas np.digitize treats all bins as
# half-open (you can specify which side is closed and which side is open;
# `right` parameter). The expected behavior for this gating function is to
# mimic np.histogram behavior, so we must reconcile this disparity.
x_bin_indices[data_ch[:, 0] == xe[-1]] = len(xe) - 2
y_bin_indices[data_ch[:, 1] == ye[-1]] = len(ye) - 2
# Ignore (gate out) events which exist outside specified bins.
# `np.digitize()-1` will assign events less than `bins` to bin "-1" and
# events greater than `bins` to len(bins)-1.
outlier_mask = (x_bin_indices == -1) | (x_bin_indices == len(xe) - 1) | (y_bin_indices == -1) | (y_bin_indices == len(ye) - 1)
event_indices = event_indices[~outlier_mask]
x_bin_indices = x_bin_indices[~outlier_mask]
y_bin_indices = y_bin_indices[~outlier_mask]
# Create a 2D array of lists mimicking the histogram to accumulate events
# associated with each bin.
filler = np.frompyfunc(lambda x: list(), 1, 1)
H_events = np.empty_like(H, dtype=np.object)
filler(H_events, H_events)
for (event_idx, x_bin_idx, y_bin_idx) in zip(event_indices, x_bin_indices, y_bin_indices):
H_events[x_bin_idx, y_bin_idx].append(event_idx) # depends on [control=['for'], data=[]]
# Determine number of events to keep. Only consider events which have not
# been thrown out as outliers.
n = int(np.ceil(gate_fraction * float(len(event_indices))))
# n = 0 edge case (e.g. if gate_fraction = 0.0); incorrectly handled below
if n == 0:
mask = np.zeros(shape=data_ch.shape[0], dtype=bool)
gated_data = data[mask]
if full_output:
return Density2dGateOutput(gated_data=gated_data, mask=mask, contour=[]) # depends on [control=['if'], data=[]]
else:
return gated_data # depends on [control=['if'], data=[]]
# Smooth 2D histogram
sH = scipy.ndimage.filters.gaussian_filter(H, sigma=sigma, order=0, mode='constant', cval=0.0, truncate=6.0)
# Normalize smoothed histogram to make it a valid probability mass function
D = sH / np.sum(sH)
# Sort bins by density
vD = D.ravel()
vH = H.ravel()
sidx = np.argsort(vD)[::-1]
svH = vH[sidx] # linearized counts array sorted by density
# Find minimum number of accepted bins needed to reach specified number
# of events
csvH = np.cumsum(svH)
Nidx = np.nonzero(csvH >= n)[0][0] # we want to include this index
# Get indices of events to keep
vH_events = H_events.ravel()
accepted_indices = vH_events[sidx[:Nidx + 1]] # flatten list of lists
accepted_indices = np.array([item for sublist in accepted_indices for item in sublist])
accepted_indices = np.sort(accepted_indices)
# Convert list of accepted indices to boolean mask array
mask = np.zeros(shape=data.shape[0], dtype=bool)
mask[accepted_indices] = True
gated_data = data[mask]
if full_output:
# Use scikit-image to find the contour of the gated region
#
# To find the contour of the gated region, values in the 2D probability
# mass function ``D`` are used to trace contours at the level of the
# probability associated with the last accepted bin, ``vD[sidx[Nidx]]``.
# find_contours() specifies contours as collections of row and column
# indices into the density matrix. The row or column index may be
# interpolated (i.e. non-integer) for greater precision.
contours_ij = skimage.measure.find_contours(D, vD[sidx[Nidx]])
# Map contours from indices into density matrix to histogram x and y
# coordinate spaces (assume values in the density matrix are associated
# with histogram bin centers).
xc = (xe[:-1] + xe[1:]) / 2.0 # x-axis bin centers
yc = (ye[:-1] + ye[1:]) / 2.0 # y-axis bin centers
contours = [np.array([np.interp(contour_ij[:, 0], np.arange(len(xc)), xc), np.interp(contour_ij[:, 1], np.arange(len(yc)), yc)]).T for contour_ij in contours_ij]
return Density2dGateOutput(gated_data=gated_data, mask=mask, contour=contours) # depends on [control=['if'], data=[]]
else:
return gated_data |
def prerequisites(self):
"""
Iterates through the inputs of the pipelinen and determines the
all prerequisite pipelines
"""
# Loop through the inputs to the pipeline and add the instancemethods
# for the pipelines to generate each of the processed inputs
prereqs = defaultdict(set)
for input in self.inputs: # @ReservedAssignment
spec = self._study.spec(input)
# Could be an input to the study or optional acquired spec
if spec.is_spec and spec.derived:
prereqs[spec.pipeline_getter].add(input.name)
return prereqs | def function[prerequisites, parameter[self]]:
constant[
Iterates through the inputs of the pipelinen and determines the
all prerequisite pipelines
]
variable[prereqs] assign[=] call[name[defaultdict], parameter[name[set]]]
for taget[name[input]] in starred[name[self].inputs] begin[:]
variable[spec] assign[=] call[name[self]._study.spec, parameter[name[input]]]
if <ast.BoolOp object at 0x7da204564ac0> begin[:]
call[call[name[prereqs]][name[spec].pipeline_getter].add, parameter[name[input].name]]
return[name[prereqs]] | keyword[def] identifier[prerequisites] ( identifier[self] ):
literal[string]
identifier[prereqs] = identifier[defaultdict] ( identifier[set] )
keyword[for] identifier[input] keyword[in] identifier[self] . identifier[inputs] :
identifier[spec] = identifier[self] . identifier[_study] . identifier[spec] ( identifier[input] )
keyword[if] identifier[spec] . identifier[is_spec] keyword[and] identifier[spec] . identifier[derived] :
identifier[prereqs] [ identifier[spec] . identifier[pipeline_getter] ]. identifier[add] ( identifier[input] . identifier[name] )
keyword[return] identifier[prereqs] | def prerequisites(self):
"""
Iterates through the inputs of the pipelinen and determines the
all prerequisite pipelines
"""
# Loop through the inputs to the pipeline and add the instancemethods
# for the pipelines to generate each of the processed inputs
prereqs = defaultdict(set)
for input in self.inputs: # @ReservedAssignment
spec = self._study.spec(input)
# Could be an input to the study or optional acquired spec
if spec.is_spec and spec.derived:
prereqs[spec.pipeline_getter].add(input.name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['input']]
return prereqs |
def _simplify_arguments(arguments):
"""
If positional or keyword arguments are empty return only one or the other.
"""
if len(arguments.args) == 0:
return arguments.kwargs
elif len(arguments.kwargs) == 0:
return arguments.args
else:
return arguments | def function[_simplify_arguments, parameter[arguments]]:
constant[
If positional or keyword arguments are empty return only one or the other.
]
if compare[call[name[len], parameter[name[arguments].args]] equal[==] constant[0]] begin[:]
return[name[arguments].kwargs] | keyword[def] identifier[_simplify_arguments] ( identifier[arguments] ):
literal[string]
keyword[if] identifier[len] ( identifier[arguments] . identifier[args] )== literal[int] :
keyword[return] identifier[arguments] . identifier[kwargs]
keyword[elif] identifier[len] ( identifier[arguments] . identifier[kwargs] )== literal[int] :
keyword[return] identifier[arguments] . identifier[args]
keyword[else] :
keyword[return] identifier[arguments] | def _simplify_arguments(arguments):
"""
If positional or keyword arguments are empty return only one or the other.
"""
if len(arguments.args) == 0:
return arguments.kwargs # depends on [control=['if'], data=[]]
elif len(arguments.kwargs) == 0:
return arguments.args # depends on [control=['if'], data=[]]
else:
return arguments |
def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = """
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
"""
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single()
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id))
if legacy:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'].properties['handle_id'],
'end': record['end'].properties['handle_id'],
}
else:
bundle = {
'type': record['r'].type,
'id': int(relationship_id),
'data': record['r'].properties,
'start': record['start'],
'end': record['end'],
}
return bundle | def function[get_relationship_bundle, parameter[manager, relationship_id, legacy]]:
constant[
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
]
variable[q] assign[=] constant[
MATCH (start)-[r]->(end)
WHERE ID(r) = {relationship_id}
RETURN start, r, end
]
with name[manager].session begin[:]
variable[record] assign[=] call[call[name[s].run, parameter[name[q], dictionary[[<ast.Constant object at 0x7da2044c0dc0>], [<ast.Call object at 0x7da2044c1690>]]]].single, parameter[]]
if compare[name[record] is constant[None]] begin[:]
<ast.Raise object at 0x7da2044c2ef0>
if name[legacy] begin[:]
variable[bundle] assign[=] dictionary[[<ast.Constant object at 0x7da2044c3070>, <ast.Constant object at 0x7da2044c1240>, <ast.Constant object at 0x7da2044c31c0>, <ast.Constant object at 0x7da2044c2800>, <ast.Constant object at 0x7da2044c21a0>], [<ast.Attribute object at 0x7da2044c28c0>, <ast.Call object at 0x7da2044c16c0>, <ast.Attribute object at 0x7da2044c1990>, <ast.Subscript object at 0x7da2044c2380>, <ast.Subscript object at 0x7da2044c0d30>]]
return[name[bundle]] | keyword[def] identifier[get_relationship_bundle] ( identifier[manager] , identifier[relationship_id] = keyword[None] , identifier[legacy] = keyword[True] ):
literal[string]
identifier[q] = literal[string]
keyword[with] identifier[manager] . identifier[session] keyword[as] identifier[s] :
identifier[record] = identifier[s] . identifier[run] ( identifier[q] ,{ literal[string] : identifier[int] ( identifier[relationship_id] )}). identifier[single] ()
keyword[if] identifier[record] keyword[is] keyword[None] :
keyword[raise] identifier[exceptions] . identifier[RelationshipNotFound] ( identifier[manager] , identifier[int] ( identifier[relationship_id] ))
keyword[if] identifier[legacy] :
identifier[bundle] ={
literal[string] : identifier[record] [ literal[string] ]. identifier[type] ,
literal[string] : identifier[int] ( identifier[relationship_id] ),
literal[string] : identifier[record] [ literal[string] ]. identifier[properties] ,
literal[string] : identifier[record] [ literal[string] ]. identifier[properties] [ literal[string] ],
literal[string] : identifier[record] [ literal[string] ]. identifier[properties] [ literal[string] ],
}
keyword[else] :
identifier[bundle] ={
literal[string] : identifier[record] [ literal[string] ]. identifier[type] ,
literal[string] : identifier[int] ( identifier[relationship_id] ),
literal[string] : identifier[record] [ literal[string] ]. identifier[properties] ,
literal[string] : identifier[record] [ literal[string] ],
literal[string] : identifier[record] [ literal[string] ],
}
keyword[return] identifier[bundle] | def get_relationship_bundle(manager, relationship_id=None, legacy=True):
"""
:param manager: Neo4jDBSessionManager
:param relationship_id: Internal Neo4j id
:param legacy: Backwards compatibility
:type relationship_id: int
:type legacy: bool
:rtype: dictionary
"""
q = '\n MATCH (start)-[r]->(end)\n WHERE ID(r) = {relationship_id}\n RETURN start, r, end\n '
with manager.session as s:
record = s.run(q, {'relationship_id': int(relationship_id)}).single() # depends on [control=['with'], data=['s']]
if record is None:
raise exceptions.RelationshipNotFound(manager, int(relationship_id)) # depends on [control=['if'], data=[]]
if legacy:
bundle = {'type': record['r'].type, 'id': int(relationship_id), 'data': record['r'].properties, 'start': record['start'].properties['handle_id'], 'end': record['end'].properties['handle_id']} # depends on [control=['if'], data=[]]
else:
bundle = {'type': record['r'].type, 'id': int(relationship_id), 'data': record['r'].properties, 'start': record['start'], 'end': record['end']}
return bundle |
def find_mip(self, direction, mechanism, purview, allow_neg=False):
"""Find the ratio minimum information partition for a mechanism
over a purview.
Args:
direction (str): |CAUSE| or |EFFECT|
mechanism (tuple[int]): A mechanism.
purview (tuple[int]): A purview.
Keyword Args:
allow_neg (boolean): If true, ``alpha`` is allowed to be negative.
Otherwise, negative values of ``alpha`` will be treated as if
they were 0.
Returns:
AcRepertoireIrreducibilityAnalysis: The irreducibility analysis for
the mechanism.
"""
alpha_min = float('inf')
probability = self.probability(direction, mechanism, purview)
for partition in mip_partitions(mechanism, purview, self.node_labels):
partitioned_probability = self.partitioned_probability(
direction, partition)
alpha = log2(probability / partitioned_probability)
# First check for 0
# Default: don't count contrary causes and effects
if utils.eq(alpha, 0) or (alpha < 0 and not allow_neg):
return AcRepertoireIrreducibilityAnalysis(
state=self.mechanism_state(direction),
direction=direction,
mechanism=mechanism,
purview=purview,
partition=partition,
probability=probability,
partitioned_probability=partitioned_probability,
node_labels=self.node_labels,
alpha=0.0
)
# Then take closest to 0
if (abs(alpha_min) - abs(alpha)) > constants.EPSILON:
alpha_min = alpha
acria = AcRepertoireIrreducibilityAnalysis(
state=self.mechanism_state(direction),
direction=direction,
mechanism=mechanism,
purview=purview,
partition=partition,
probability=probability,
partitioned_probability=partitioned_probability,
node_labels=self.node_labels,
alpha=alpha_min
)
return acria | def function[find_mip, parameter[self, direction, mechanism, purview, allow_neg]]:
constant[Find the ratio minimum information partition for a mechanism
over a purview.
Args:
direction (str): |CAUSE| or |EFFECT|
mechanism (tuple[int]): A mechanism.
purview (tuple[int]): A purview.
Keyword Args:
allow_neg (boolean): If true, ``alpha`` is allowed to be negative.
Otherwise, negative values of ``alpha`` will be treated as if
they were 0.
Returns:
AcRepertoireIrreducibilityAnalysis: The irreducibility analysis for
the mechanism.
]
variable[alpha_min] assign[=] call[name[float], parameter[constant[inf]]]
variable[probability] assign[=] call[name[self].probability, parameter[name[direction], name[mechanism], name[purview]]]
for taget[name[partition]] in starred[call[name[mip_partitions], parameter[name[mechanism], name[purview], name[self].node_labels]]] begin[:]
variable[partitioned_probability] assign[=] call[name[self].partitioned_probability, parameter[name[direction], name[partition]]]
variable[alpha] assign[=] call[name[log2], parameter[binary_operation[name[probability] / name[partitioned_probability]]]]
if <ast.BoolOp object at 0x7da1b23454e0> begin[:]
return[call[name[AcRepertoireIrreducibilityAnalysis], parameter[]]]
if compare[binary_operation[call[name[abs], parameter[name[alpha_min]]] - call[name[abs], parameter[name[alpha]]]] greater[>] name[constants].EPSILON] begin[:]
variable[alpha_min] assign[=] name[alpha]
variable[acria] assign[=] call[name[AcRepertoireIrreducibilityAnalysis], parameter[]]
return[name[acria]] | keyword[def] identifier[find_mip] ( identifier[self] , identifier[direction] , identifier[mechanism] , identifier[purview] , identifier[allow_neg] = keyword[False] ):
literal[string]
identifier[alpha_min] = identifier[float] ( literal[string] )
identifier[probability] = identifier[self] . identifier[probability] ( identifier[direction] , identifier[mechanism] , identifier[purview] )
keyword[for] identifier[partition] keyword[in] identifier[mip_partitions] ( identifier[mechanism] , identifier[purview] , identifier[self] . identifier[node_labels] ):
identifier[partitioned_probability] = identifier[self] . identifier[partitioned_probability] (
identifier[direction] , identifier[partition] )
identifier[alpha] = identifier[log2] ( identifier[probability] / identifier[partitioned_probability] )
keyword[if] identifier[utils] . identifier[eq] ( identifier[alpha] , literal[int] ) keyword[or] ( identifier[alpha] < literal[int] keyword[and] keyword[not] identifier[allow_neg] ):
keyword[return] identifier[AcRepertoireIrreducibilityAnalysis] (
identifier[state] = identifier[self] . identifier[mechanism_state] ( identifier[direction] ),
identifier[direction] = identifier[direction] ,
identifier[mechanism] = identifier[mechanism] ,
identifier[purview] = identifier[purview] ,
identifier[partition] = identifier[partition] ,
identifier[probability] = identifier[probability] ,
identifier[partitioned_probability] = identifier[partitioned_probability] ,
identifier[node_labels] = identifier[self] . identifier[node_labels] ,
identifier[alpha] = literal[int]
)
keyword[if] ( identifier[abs] ( identifier[alpha_min] )- identifier[abs] ( identifier[alpha] ))> identifier[constants] . identifier[EPSILON] :
identifier[alpha_min] = identifier[alpha]
identifier[acria] = identifier[AcRepertoireIrreducibilityAnalysis] (
identifier[state] = identifier[self] . identifier[mechanism_state] ( identifier[direction] ),
identifier[direction] = identifier[direction] ,
identifier[mechanism] = identifier[mechanism] ,
identifier[purview] = identifier[purview] ,
identifier[partition] = identifier[partition] ,
identifier[probability] = identifier[probability] ,
identifier[partitioned_probability] = identifier[partitioned_probability] ,
identifier[node_labels] = identifier[self] . identifier[node_labels] ,
identifier[alpha] = identifier[alpha_min]
)
keyword[return] identifier[acria] | def find_mip(self, direction, mechanism, purview, allow_neg=False):
"""Find the ratio minimum information partition for a mechanism
over a purview.
Args:
direction (str): |CAUSE| or |EFFECT|
mechanism (tuple[int]): A mechanism.
purview (tuple[int]): A purview.
Keyword Args:
allow_neg (boolean): If true, ``alpha`` is allowed to be negative.
Otherwise, negative values of ``alpha`` will be treated as if
they were 0.
Returns:
AcRepertoireIrreducibilityAnalysis: The irreducibility analysis for
the mechanism.
"""
alpha_min = float('inf')
probability = self.probability(direction, mechanism, purview)
for partition in mip_partitions(mechanism, purview, self.node_labels):
partitioned_probability = self.partitioned_probability(direction, partition)
alpha = log2(probability / partitioned_probability)
# First check for 0
# Default: don't count contrary causes and effects
if utils.eq(alpha, 0) or (alpha < 0 and (not allow_neg)):
return AcRepertoireIrreducibilityAnalysis(state=self.mechanism_state(direction), direction=direction, mechanism=mechanism, purview=purview, partition=partition, probability=probability, partitioned_probability=partitioned_probability, node_labels=self.node_labels, alpha=0.0) # depends on [control=['if'], data=[]]
# Then take closest to 0
if abs(alpha_min) - abs(alpha) > constants.EPSILON:
alpha_min = alpha
acria = AcRepertoireIrreducibilityAnalysis(state=self.mechanism_state(direction), direction=direction, mechanism=mechanism, purview=purview, partition=partition, probability=probability, partitioned_probability=partitioned_probability, node_labels=self.node_labels, alpha=alpha_min) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['partition']]
return acria |
def f_restore_default(self):
""" Restores the default value in all explored parameters and sets the
v_idx property back to -1 and v_crun to None."""
self._idx = -1
self._crun = None
for param in self._explored_parameters.values():
if param is not None:
param._restore_default() | def function[f_restore_default, parameter[self]]:
constant[ Restores the default value in all explored parameters and sets the
v_idx property back to -1 and v_crun to None.]
name[self]._idx assign[=] <ast.UnaryOp object at 0x7da18f720af0>
name[self]._crun assign[=] constant[None]
for taget[name[param]] in starred[call[name[self]._explored_parameters.values, parameter[]]] begin[:]
if compare[name[param] is_not constant[None]] begin[:]
call[name[param]._restore_default, parameter[]] | keyword[def] identifier[f_restore_default] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_idx] =- literal[int]
identifier[self] . identifier[_crun] = keyword[None]
keyword[for] identifier[param] keyword[in] identifier[self] . identifier[_explored_parameters] . identifier[values] ():
keyword[if] identifier[param] keyword[is] keyword[not] keyword[None] :
identifier[param] . identifier[_restore_default] () | def f_restore_default(self):
""" Restores the default value in all explored parameters and sets the
v_idx property back to -1 and v_crun to None."""
self._idx = -1
self._crun = None
for param in self._explored_parameters.values():
if param is not None:
param._restore_default() # depends on [control=['if'], data=['param']] # depends on [control=['for'], data=['param']] |
def authenticateRequest(self, request, service_request, *args, **kwargs):
"""
Authenticates the request against the service.
@param request: The AMF request
@type request: L{Request<pyamf.remoting.Request>}
"""
username = password = None
if 'Credentials' in request.headers:
cred = request.headers['Credentials']
username = cred['userid']
password = cred['password']
return self.gateway.authenticateRequest(service_request, username,
password, *args, **kwargs) | def function[authenticateRequest, parameter[self, request, service_request]]:
constant[
Authenticates the request against the service.
@param request: The AMF request
@type request: L{Request<pyamf.remoting.Request>}
]
variable[username] assign[=] constant[None]
if compare[constant[Credentials] in name[request].headers] begin[:]
variable[cred] assign[=] call[name[request].headers][constant[Credentials]]
variable[username] assign[=] call[name[cred]][constant[userid]]
variable[password] assign[=] call[name[cred]][constant[password]]
return[call[name[self].gateway.authenticateRequest, parameter[name[service_request], name[username], name[password], <ast.Starred object at 0x7da1b14e4e80>]]] | keyword[def] identifier[authenticateRequest] ( identifier[self] , identifier[request] , identifier[service_request] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[username] = identifier[password] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[request] . identifier[headers] :
identifier[cred] = identifier[request] . identifier[headers] [ literal[string] ]
identifier[username] = identifier[cred] [ literal[string] ]
identifier[password] = identifier[cred] [ literal[string] ]
keyword[return] identifier[self] . identifier[gateway] . identifier[authenticateRequest] ( identifier[service_request] , identifier[username] ,
identifier[password] ,* identifier[args] ,** identifier[kwargs] ) | def authenticateRequest(self, request, service_request, *args, **kwargs):
"""
Authenticates the request against the service.
@param request: The AMF request
@type request: L{Request<pyamf.remoting.Request>}
"""
username = password = None
if 'Credentials' in request.headers:
cred = request.headers['Credentials']
username = cred['userid']
password = cred['password'] # depends on [control=['if'], data=[]]
return self.gateway.authenticateRequest(service_request, username, password, *args, **kwargs) |
def _update_imageinfo(self):
"""
calls get_imageinfo() if data image missing info
"""
missing = self._missing_imageinfo()
deferred = self.flags.get('defer_imageinfo')
continuing = self.data.get('continue')
if missing and not deferred and not continuing:
self.get_imageinfo(show=False) | def function[_update_imageinfo, parameter[self]]:
constant[
calls get_imageinfo() if data image missing info
]
variable[missing] assign[=] call[name[self]._missing_imageinfo, parameter[]]
variable[deferred] assign[=] call[name[self].flags.get, parameter[constant[defer_imageinfo]]]
variable[continuing] assign[=] call[name[self].data.get, parameter[constant[continue]]]
if <ast.BoolOp object at 0x7da1b12511e0> begin[:]
call[name[self].get_imageinfo, parameter[]] | keyword[def] identifier[_update_imageinfo] ( identifier[self] ):
literal[string]
identifier[missing] = identifier[self] . identifier[_missing_imageinfo] ()
identifier[deferred] = identifier[self] . identifier[flags] . identifier[get] ( literal[string] )
identifier[continuing] = identifier[self] . identifier[data] . identifier[get] ( literal[string] )
keyword[if] identifier[missing] keyword[and] keyword[not] identifier[deferred] keyword[and] keyword[not] identifier[continuing] :
identifier[self] . identifier[get_imageinfo] ( identifier[show] = keyword[False] ) | def _update_imageinfo(self):
"""
calls get_imageinfo() if data image missing info
"""
missing = self._missing_imageinfo()
deferred = self.flags.get('defer_imageinfo')
continuing = self.data.get('continue')
if missing and (not deferred) and (not continuing):
self.get_imageinfo(show=False) # depends on [control=['if'], data=[]] |
def TriggersPost(self, parameters):
"""
Create a trigger on CommonSense.
If TriggersPost was successful the result, including the trigger_id, can be obtained from getResponse().
@param parameters (dictionary) - Parameters of the trigger to create.
@note
@return (bool) - Boolean indicating whether TriggersPost was successful.
"""
if self.__SenseApiCall__('/triggers.json', 'POST', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | def function[TriggersPost, parameter[self, parameters]]:
constant[
Create a trigger on CommonSense.
If TriggersPost was successful the result, including the trigger_id, can be obtained from getResponse().
@param parameters (dictionary) - Parameters of the trigger to create.
@note
@return (bool) - Boolean indicating whether TriggersPost was successful.
]
if call[name[self].__SenseApiCall__, parameter[constant[/triggers.json], constant[POST]]] begin[:]
return[constant[True]] | keyword[def] identifier[TriggersPost] ( identifier[self] , identifier[parameters] ):
literal[string]
keyword[if] identifier[self] . identifier[__SenseApiCall__] ( literal[string] , literal[string] , identifier[parameters] = identifier[parameters] ):
keyword[return] keyword[True]
keyword[else] :
identifier[self] . identifier[__error__] = literal[string]
keyword[return] keyword[False] | def TriggersPost(self, parameters):
"""
Create a trigger on CommonSense.
If TriggersPost was successful the result, including the trigger_id, can be obtained from getResponse().
@param parameters (dictionary) - Parameters of the trigger to create.
@note
@return (bool) - Boolean indicating whether TriggersPost was successful.
"""
if self.__SenseApiCall__('/triggers.json', 'POST', parameters=parameters):
return True # depends on [control=['if'], data=[]]
else:
self.__error__ = 'api call unsuccessful'
return False |
def submit_ham(self, params):
"""For submitting a ham comment to Akismet."""
# Check required params for submit-ham
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required)
response = self._request('submit-ham', params)
if response.status is 200:
return response.read() == "true"
return False | def function[submit_ham, parameter[self, params]]:
constant[For submitting a ham comment to Akismet.]
for taget[name[required]] in starred[list[[<ast.Constant object at 0x7da204960820>, <ast.Constant object at 0x7da204960c70>, <ast.Constant object at 0x7da204961360>]]] begin[:]
if compare[name[required] <ast.NotIn object at 0x7da2590d7190> name[params]] begin[:]
<ast.Raise object at 0x7da204963e50>
variable[response] assign[=] call[name[self]._request, parameter[constant[submit-ham], name[params]]]
if compare[name[response].status is constant[200]] begin[:]
return[compare[call[name[response].read, parameter[]] equal[==] constant[true]]]
return[constant[False]] | keyword[def] identifier[submit_ham] ( identifier[self] , identifier[params] ):
literal[string]
keyword[for] identifier[required] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[required] keyword[not] keyword[in] identifier[params] :
keyword[raise] identifier[MissingParams] ( identifier[required] )
identifier[response] = identifier[self] . identifier[_request] ( literal[string] , identifier[params] )
keyword[if] identifier[response] . identifier[status] keyword[is] literal[int] :
keyword[return] identifier[response] . identifier[read] ()== literal[string]
keyword[return] keyword[False] | def submit_ham(self, params):
"""For submitting a ham comment to Akismet."""
# Check required params for submit-ham
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required) # depends on [control=['if'], data=['required']] # depends on [control=['for'], data=['required']]
response = self._request('submit-ham', params)
if response.status is 200:
return response.read() == 'true' # depends on [control=['if'], data=[]]
return False |
def _expandGLQ(self, zeros, lmax, lmax_calc):
"""Evaluate the coefficients on a Gauss-Legendre quadrature grid."""
if self.normalization == '4pi':
norm = 1
elif self.normalization == 'schmidt':
norm = 2
elif self.normalization == 'unnorm':
norm = 3
elif self.normalization == 'ortho':
norm = 4
else:
raise ValueError(
"Normalization must be '4pi', 'ortho', 'schmidt', or " +
"'unnorm'. Input value was {:s}"
.format(repr(self.normalization)))
if zeros is None:
zeros, weights = _shtools.SHGLQ(self.lmax)
data = _shtools.MakeGridGLQC(self.coeffs, zeros, norm=norm,
csphase=self.csphase, lmax=lmax,
lmax_calc=lmax_calc)
gridout = SHGrid.from_array(data, grid='GLQ', copy=False)
return gridout | def function[_expandGLQ, parameter[self, zeros, lmax, lmax_calc]]:
constant[Evaluate the coefficients on a Gauss-Legendre quadrature grid.]
if compare[name[self].normalization equal[==] constant[4pi]] begin[:]
variable[norm] assign[=] constant[1]
if compare[name[zeros] is constant[None]] begin[:]
<ast.Tuple object at 0x7da18ede5000> assign[=] call[name[_shtools].SHGLQ, parameter[name[self].lmax]]
variable[data] assign[=] call[name[_shtools].MakeGridGLQC, parameter[name[self].coeffs, name[zeros]]]
variable[gridout] assign[=] call[name[SHGrid].from_array, parameter[name[data]]]
return[name[gridout]] | keyword[def] identifier[_expandGLQ] ( identifier[self] , identifier[zeros] , identifier[lmax] , identifier[lmax_calc] ):
literal[string]
keyword[if] identifier[self] . identifier[normalization] == literal[string] :
identifier[norm] = literal[int]
keyword[elif] identifier[self] . identifier[normalization] == literal[string] :
identifier[norm] = literal[int]
keyword[elif] identifier[self] . identifier[normalization] == literal[string] :
identifier[norm] = literal[int]
keyword[elif] identifier[self] . identifier[normalization] == literal[string] :
identifier[norm] = literal[int]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] +
literal[string]
. identifier[format] ( identifier[repr] ( identifier[self] . identifier[normalization] )))
keyword[if] identifier[zeros] keyword[is] keyword[None] :
identifier[zeros] , identifier[weights] = identifier[_shtools] . identifier[SHGLQ] ( identifier[self] . identifier[lmax] )
identifier[data] = identifier[_shtools] . identifier[MakeGridGLQC] ( identifier[self] . identifier[coeffs] , identifier[zeros] , identifier[norm] = identifier[norm] ,
identifier[csphase] = identifier[self] . identifier[csphase] , identifier[lmax] = identifier[lmax] ,
identifier[lmax_calc] = identifier[lmax_calc] )
identifier[gridout] = identifier[SHGrid] . identifier[from_array] ( identifier[data] , identifier[grid] = literal[string] , identifier[copy] = keyword[False] )
keyword[return] identifier[gridout] | def _expandGLQ(self, zeros, lmax, lmax_calc):
"""Evaluate the coefficients on a Gauss-Legendre quadrature grid."""
if self.normalization == '4pi':
norm = 1 # depends on [control=['if'], data=[]]
elif self.normalization == 'schmidt':
norm = 2 # depends on [control=['if'], data=[]]
elif self.normalization == 'unnorm':
norm = 3 # depends on [control=['if'], data=[]]
elif self.normalization == 'ortho':
norm = 4 # depends on [control=['if'], data=[]]
else:
raise ValueError("Normalization must be '4pi', 'ortho', 'schmidt', or " + "'unnorm'. Input value was {:s}".format(repr(self.normalization)))
if zeros is None:
(zeros, weights) = _shtools.SHGLQ(self.lmax) # depends on [control=['if'], data=['zeros']]
data = _shtools.MakeGridGLQC(self.coeffs, zeros, norm=norm, csphase=self.csphase, lmax=lmax, lmax_calc=lmax_calc)
gridout = SHGrid.from_array(data, grid='GLQ', copy=False)
return gridout |
def log_download(self, log_num, filename):
'''download a log file'''
print("Downloading log %u as %s" % (log_num, filename))
self.download_lognum = log_num
self.download_file = open(filename, "wb")
self.master.mav.log_request_data_send(self.target_system,
self.target_component,
log_num, 0, 0xFFFFFFFF)
self.download_filename = filename
self.download_set = set()
self.download_start = time.time()
self.download_last_timestamp = time.time()
self.download_ofs = 0
self.retries = 0 | def function[log_download, parameter[self, log_num, filename]]:
constant[download a log file]
call[name[print], parameter[binary_operation[constant[Downloading log %u as %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2345bd0>, <ast.Name object at 0x7da1b2344490>]]]]]
name[self].download_lognum assign[=] name[log_num]
name[self].download_file assign[=] call[name[open], parameter[name[filename], constant[wb]]]
call[name[self].master.mav.log_request_data_send, parameter[name[self].target_system, name[self].target_component, name[log_num], constant[0], constant[4294967295]]]
name[self].download_filename assign[=] name[filename]
name[self].download_set assign[=] call[name[set], parameter[]]
name[self].download_start assign[=] call[name[time].time, parameter[]]
name[self].download_last_timestamp assign[=] call[name[time].time, parameter[]]
name[self].download_ofs assign[=] constant[0]
name[self].retries assign[=] constant[0] | keyword[def] identifier[log_download] ( identifier[self] , identifier[log_num] , identifier[filename] ):
literal[string]
identifier[print] ( literal[string] %( identifier[log_num] , identifier[filename] ))
identifier[self] . identifier[download_lognum] = identifier[log_num]
identifier[self] . identifier[download_file] = identifier[open] ( identifier[filename] , literal[string] )
identifier[self] . identifier[master] . identifier[mav] . identifier[log_request_data_send] ( identifier[self] . identifier[target_system] ,
identifier[self] . identifier[target_component] ,
identifier[log_num] , literal[int] , literal[int] )
identifier[self] . identifier[download_filename] = identifier[filename]
identifier[self] . identifier[download_set] = identifier[set] ()
identifier[self] . identifier[download_start] = identifier[time] . identifier[time] ()
identifier[self] . identifier[download_last_timestamp] = identifier[time] . identifier[time] ()
identifier[self] . identifier[download_ofs] = literal[int]
identifier[self] . identifier[retries] = literal[int] | def log_download(self, log_num, filename):
"""download a log file"""
print('Downloading log %u as %s' % (log_num, filename))
self.download_lognum = log_num
self.download_file = open(filename, 'wb')
self.master.mav.log_request_data_send(self.target_system, self.target_component, log_num, 0, 4294967295)
self.download_filename = filename
self.download_set = set()
self.download_start = time.time()
self.download_last_timestamp = time.time()
self.download_ofs = 0
self.retries = 0 |
def feedback_summaries(self):
"""
Access the feedback_summaries
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryList
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryList
"""
if self._feedback_summaries is None:
self._feedback_summaries = FeedbackSummaryList(
self._version,
account_sid=self._solution['account_sid'],
)
return self._feedback_summaries | def function[feedback_summaries, parameter[self]]:
constant[
Access the feedback_summaries
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryList
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryList
]
if compare[name[self]._feedback_summaries is constant[None]] begin[:]
name[self]._feedback_summaries assign[=] call[name[FeedbackSummaryList], parameter[name[self]._version]]
return[name[self]._feedback_summaries] | keyword[def] identifier[feedback_summaries] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_feedback_summaries] keyword[is] keyword[None] :
identifier[self] . identifier[_feedback_summaries] = identifier[FeedbackSummaryList] (
identifier[self] . identifier[_version] ,
identifier[account_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
)
keyword[return] identifier[self] . identifier[_feedback_summaries] | def feedback_summaries(self):
"""
Access the feedback_summaries
:returns: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryList
:rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryList
"""
if self._feedback_summaries is None:
self._feedback_summaries = FeedbackSummaryList(self._version, account_sid=self._solution['account_sid']) # depends on [control=['if'], data=[]]
return self._feedback_summaries |
def write(self):
"""
Create or update a Wiki Page on Assembla
"""
if not hasattr(self, 'space'):
raise AttributeError("A WikiPage must have a 'space' attribute before you can write it to Assembla.")
self.api = self.space.api
if self.get('id'): # We are modifying an existing wiki page
return self.api._put_json(
self,
space=self.space,
rel_path=self.space._build_rel_path('wiki_pages'),
id_field='id'
)
else: # Creating a new wiki page
return self.api._post_json(
self,
space=self.space,
rel_path=self.space._build_rel_path('wiki_pages'),
) | def function[write, parameter[self]]:
constant[
Create or update a Wiki Page on Assembla
]
if <ast.UnaryOp object at 0x7da2044c0e80> begin[:]
<ast.Raise object at 0x7da2044c1270>
name[self].api assign[=] name[self].space.api
if call[name[self].get, parameter[constant[id]]] begin[:]
return[call[name[self].api._put_json, parameter[name[self]]]] | keyword[def] identifier[write] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[raise] identifier[AttributeError] ( literal[string] )
identifier[self] . identifier[api] = identifier[self] . identifier[space] . identifier[api]
keyword[if] identifier[self] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[api] . identifier[_put_json] (
identifier[self] ,
identifier[space] = identifier[self] . identifier[space] ,
identifier[rel_path] = identifier[self] . identifier[space] . identifier[_build_rel_path] ( literal[string] ),
identifier[id_field] = literal[string]
)
keyword[else] :
keyword[return] identifier[self] . identifier[api] . identifier[_post_json] (
identifier[self] ,
identifier[space] = identifier[self] . identifier[space] ,
identifier[rel_path] = identifier[self] . identifier[space] . identifier[_build_rel_path] ( literal[string] ),
) | def write(self):
"""
Create or update a Wiki Page on Assembla
"""
if not hasattr(self, 'space'):
raise AttributeError("A WikiPage must have a 'space' attribute before you can write it to Assembla.") # depends on [control=['if'], data=[]]
self.api = self.space.api
if self.get('id'): # We are modifying an existing wiki page
return self.api._put_json(self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages'), id_field='id') # depends on [control=['if'], data=[]]
else: # Creating a new wiki page
return self.api._post_json(self, space=self.space, rel_path=self.space._build_rel_path('wiki_pages')) |
def start(debug=False, host='127.0.0.1'):
""" starts a nago agent (daemon) process """
if debug:
debug = True
nago.protocols.httpserver.app.run(debug=debug, host=host) | def function[start, parameter[debug, host]]:
constant[ starts a nago agent (daemon) process ]
if name[debug] begin[:]
variable[debug] assign[=] constant[True]
call[name[nago].protocols.httpserver.app.run, parameter[]] | keyword[def] identifier[start] ( identifier[debug] = keyword[False] , identifier[host] = literal[string] ):
literal[string]
keyword[if] identifier[debug] :
identifier[debug] = keyword[True]
identifier[nago] . identifier[protocols] . identifier[httpserver] . identifier[app] . identifier[run] ( identifier[debug] = identifier[debug] , identifier[host] = identifier[host] ) | def start(debug=False, host='127.0.0.1'):
""" starts a nago agent (daemon) process """
if debug:
debug = True # depends on [control=['if'], data=[]]
nago.protocols.httpserver.app.run(debug=debug, host=host) |
def handle_editor_command(self, cli, document):
"""
Editor command is any query that is prefixed or suffixed
by a '\e'. The reason for a while loop is because a user
might edit a query multiple times.
For eg:
"select * from \e"<enter> to edit it in vim, then come
back to the prompt with the edited query "select * from
blah where q = 'abc'\e" to edit it again.
:param cli: CommandLineInterface
:param document: Document
:return: Document
"""
# FIXME: using application.pre_run_callables like this here is not the best solution.
# It's internal api of prompt_toolkit that may change. This was added to fix
# https://github.com/dbcli/pgcli/issues/668. We may find a better way to do it in the future.
saved_callables = cli.application.pre_run_callables
while special.editor_command(document.text):
filename = special.get_filename(document.text)
query = (special.get_editor_query(document.text) or
self.get_last_query())
sql, message = special.open_external_editor(filename, sql=query)
if message:
# Something went wrong. Raise an exception and bail.
raise RuntimeError(message)
cli.current_buffer.document = Document(sql, cursor_position=len(sql))
cli.application.pre_run_callables = []
document = cli.run()
continue
cli.application.pre_run_callables = saved_callables
return document | def function[handle_editor_command, parameter[self, cli, document]]:
constant[
Editor command is any query that is prefixed or suffixed
by a '\e'. The reason for a while loop is because a user
might edit a query multiple times.
For eg:
"select * from \e"<enter> to edit it in vim, then come
back to the prompt with the edited query "select * from
blah where q = 'abc'\e" to edit it again.
:param cli: CommandLineInterface
:param document: Document
:return: Document
]
variable[saved_callables] assign[=] name[cli].application.pre_run_callables
while call[name[special].editor_command, parameter[name[document].text]] begin[:]
variable[filename] assign[=] call[name[special].get_filename, parameter[name[document].text]]
variable[query] assign[=] <ast.BoolOp object at 0x7da18bc71870>
<ast.Tuple object at 0x7da18bc70580> assign[=] call[name[special].open_external_editor, parameter[name[filename]]]
if name[message] begin[:]
<ast.Raise object at 0x7da18bc72170>
name[cli].current_buffer.document assign[=] call[name[Document], parameter[name[sql]]]
name[cli].application.pre_run_callables assign[=] list[[]]
variable[document] assign[=] call[name[cli].run, parameter[]]
continue
name[cli].application.pre_run_callables assign[=] name[saved_callables]
return[name[document]] | keyword[def] identifier[handle_editor_command] ( identifier[self] , identifier[cli] , identifier[document] ):
literal[string]
identifier[saved_callables] = identifier[cli] . identifier[application] . identifier[pre_run_callables]
keyword[while] identifier[special] . identifier[editor_command] ( identifier[document] . identifier[text] ):
identifier[filename] = identifier[special] . identifier[get_filename] ( identifier[document] . identifier[text] )
identifier[query] =( identifier[special] . identifier[get_editor_query] ( identifier[document] . identifier[text] ) keyword[or]
identifier[self] . identifier[get_last_query] ())
identifier[sql] , identifier[message] = identifier[special] . identifier[open_external_editor] ( identifier[filename] , identifier[sql] = identifier[query] )
keyword[if] identifier[message] :
keyword[raise] identifier[RuntimeError] ( identifier[message] )
identifier[cli] . identifier[current_buffer] . identifier[document] = identifier[Document] ( identifier[sql] , identifier[cursor_position] = identifier[len] ( identifier[sql] ))
identifier[cli] . identifier[application] . identifier[pre_run_callables] =[]
identifier[document] = identifier[cli] . identifier[run] ()
keyword[continue]
identifier[cli] . identifier[application] . identifier[pre_run_callables] = identifier[saved_callables]
keyword[return] identifier[document] | def handle_editor_command(self, cli, document):
"""
Editor command is any query that is prefixed or suffixed
by a '\\e'. The reason for a while loop is because a user
might edit a query multiple times.
For eg:
"select * from \\e"<enter> to edit it in vim, then come
back to the prompt with the edited query "select * from
blah where q = 'abc'\\e" to edit it again.
:param cli: CommandLineInterface
:param document: Document
:return: Document
"""
# FIXME: using application.pre_run_callables like this here is not the best solution.
# It's internal api of prompt_toolkit that may change. This was added to fix
# https://github.com/dbcli/pgcli/issues/668. We may find a better way to do it in the future.
saved_callables = cli.application.pre_run_callables
while special.editor_command(document.text):
filename = special.get_filename(document.text)
query = special.get_editor_query(document.text) or self.get_last_query()
(sql, message) = special.open_external_editor(filename, sql=query)
if message:
# Something went wrong. Raise an exception and bail.
raise RuntimeError(message) # depends on [control=['if'], data=[]]
cli.current_buffer.document = Document(sql, cursor_position=len(sql))
cli.application.pre_run_callables = []
document = cli.run()
continue # depends on [control=['while'], data=[]]
cli.application.pre_run_callables = saved_callables
return document |
def check_status(self, ignore=(), status=None):
"""
Checks status of each collection and shard to make sure that:
a) Cluster state is active
b) Number of docs matches across replicas for a given shard.
Returns a dict of results for custom alerting.
"""
self.SHARD_CHECKS = [
{'check_msg': 'Bad Core Count Check', 'f': self._check_shard_count},
{'check_msg': 'Bad Shard Cluster Status', 'f': self._check_shard_status}
]
if status is None:
status = self.clusterstatus()
out = {}
for collection in status:
out[collection] = {}
out[collection]['coll_status'] = True # Means it's fine
out[collection]['coll_messages'] = []
for shard in status[collection]:
self.logger.debug("Checking {}/{}".format(collection, shard))
s_dict = status[collection][shard]
for check in self.SHARD_CHECKS:
if check['check_msg'] in ignore:
continue
res = check['f'](s_dict)
if not res:
out[collection]['coll_status'] = False
if check['check_msg'] not in out[collection]['coll_messages']:
out[collection]['coll_messages'].append(check['check_msg'])
self.logger.debug(s_dict)
return out | def function[check_status, parameter[self, ignore, status]]:
constant[
Checks status of each collection and shard to make sure that:
a) Cluster state is active
b) Number of docs matches across replicas for a given shard.
Returns a dict of results for custom alerting.
]
name[self].SHARD_CHECKS assign[=] list[[<ast.Dict object at 0x7da18dc9bc10>, <ast.Dict object at 0x7da18dc9a500>]]
if compare[name[status] is constant[None]] begin[:]
variable[status] assign[=] call[name[self].clusterstatus, parameter[]]
variable[out] assign[=] dictionary[[], []]
for taget[name[collection]] in starred[name[status]] begin[:]
call[name[out]][name[collection]] assign[=] dictionary[[], []]
call[call[name[out]][name[collection]]][constant[coll_status]] assign[=] constant[True]
call[call[name[out]][name[collection]]][constant[coll_messages]] assign[=] list[[]]
for taget[name[shard]] in starred[call[name[status]][name[collection]]] begin[:]
call[name[self].logger.debug, parameter[call[constant[Checking {}/{}].format, parameter[name[collection], name[shard]]]]]
variable[s_dict] assign[=] call[call[name[status]][name[collection]]][name[shard]]
for taget[name[check]] in starred[name[self].SHARD_CHECKS] begin[:]
if compare[call[name[check]][constant[check_msg]] in name[ignore]] begin[:]
continue
variable[res] assign[=] call[call[name[check]][constant[f]], parameter[name[s_dict]]]
if <ast.UnaryOp object at 0x7da1b0f12440> begin[:]
call[call[name[out]][name[collection]]][constant[coll_status]] assign[=] constant[False]
if compare[call[name[check]][constant[check_msg]] <ast.NotIn object at 0x7da2590d7190> call[call[name[out]][name[collection]]][constant[coll_messages]]] begin[:]
call[call[call[name[out]][name[collection]]][constant[coll_messages]].append, parameter[call[name[check]][constant[check_msg]]]]
call[name[self].logger.debug, parameter[name[s_dict]]]
return[name[out]] | keyword[def] identifier[check_status] ( identifier[self] , identifier[ignore] =(), identifier[status] = keyword[None] ):
literal[string]
identifier[self] . identifier[SHARD_CHECKS] =[
{ literal[string] : literal[string] , literal[string] : identifier[self] . identifier[_check_shard_count] },
{ literal[string] : literal[string] , literal[string] : identifier[self] . identifier[_check_shard_status] }
]
keyword[if] identifier[status] keyword[is] keyword[None] :
identifier[status] = identifier[self] . identifier[clusterstatus] ()
identifier[out] ={}
keyword[for] identifier[collection] keyword[in] identifier[status] :
identifier[out] [ identifier[collection] ]={}
identifier[out] [ identifier[collection] ][ literal[string] ]= keyword[True]
identifier[out] [ identifier[collection] ][ literal[string] ]=[]
keyword[for] identifier[shard] keyword[in] identifier[status] [ identifier[collection] ]:
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[collection] , identifier[shard] ))
identifier[s_dict] = identifier[status] [ identifier[collection] ][ identifier[shard] ]
keyword[for] identifier[check] keyword[in] identifier[self] . identifier[SHARD_CHECKS] :
keyword[if] identifier[check] [ literal[string] ] keyword[in] identifier[ignore] :
keyword[continue]
identifier[res] = identifier[check] [ literal[string] ]( identifier[s_dict] )
keyword[if] keyword[not] identifier[res] :
identifier[out] [ identifier[collection] ][ literal[string] ]= keyword[False]
keyword[if] identifier[check] [ literal[string] ] keyword[not] keyword[in] identifier[out] [ identifier[collection] ][ literal[string] ]:
identifier[out] [ identifier[collection] ][ literal[string] ]. identifier[append] ( identifier[check] [ literal[string] ])
identifier[self] . identifier[logger] . identifier[debug] ( identifier[s_dict] )
keyword[return] identifier[out] | def check_status(self, ignore=(), status=None):
"""
Checks status of each collection and shard to make sure that:
a) Cluster state is active
b) Number of docs matches across replicas for a given shard.
Returns a dict of results for custom alerting.
"""
self.SHARD_CHECKS = [{'check_msg': 'Bad Core Count Check', 'f': self._check_shard_count}, {'check_msg': 'Bad Shard Cluster Status', 'f': self._check_shard_status}]
if status is None:
status = self.clusterstatus() # depends on [control=['if'], data=['status']]
out = {}
for collection in status:
out[collection] = {}
out[collection]['coll_status'] = True # Means it's fine
out[collection]['coll_messages'] = []
for shard in status[collection]:
self.logger.debug('Checking {}/{}'.format(collection, shard))
s_dict = status[collection][shard]
for check in self.SHARD_CHECKS:
if check['check_msg'] in ignore:
continue # depends on [control=['if'], data=[]]
res = check['f'](s_dict)
if not res:
out[collection]['coll_status'] = False
if check['check_msg'] not in out[collection]['coll_messages']:
out[collection]['coll_messages'].append(check['check_msg']) # depends on [control=['if'], data=[]]
self.logger.debug(s_dict) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['check']] # depends on [control=['for'], data=['shard']] # depends on [control=['for'], data=['collection']]
return out |
def _create_figure(kwargs: Mapping[str, Any]) -> dict:
"""Create basic dictionary object with figure properties."""
return {
"$schema": "https://vega.github.io/schema/vega/v3.json",
"width": kwargs.pop("width", DEFAULT_WIDTH),
"height": kwargs.pop("height", DEFAULT_HEIGHT),
"padding": kwargs.pop("padding", DEFAULT_PADDING)
} | def function[_create_figure, parameter[kwargs]]:
constant[Create basic dictionary object with figure properties.]
return[dictionary[[<ast.Constant object at 0x7da18f8105b0>, <ast.Constant object at 0x7da18f8110c0>, <ast.Constant object at 0x7da18f811480>, <ast.Constant object at 0x7da18f811a80>], [<ast.Constant object at 0x7da18f8130d0>, <ast.Call object at 0x7da18f8119c0>, <ast.Call object at 0x7da18f8129e0>, <ast.Call object at 0x7da18f813d00>]]] | keyword[def] identifier[_create_figure] ( identifier[kwargs] : identifier[Mapping] [ identifier[str] , identifier[Any] ])-> identifier[dict] :
literal[string]
keyword[return] {
literal[string] : literal[string] ,
literal[string] : identifier[kwargs] . identifier[pop] ( literal[string] , identifier[DEFAULT_WIDTH] ),
literal[string] : identifier[kwargs] . identifier[pop] ( literal[string] , identifier[DEFAULT_HEIGHT] ),
literal[string] : identifier[kwargs] . identifier[pop] ( literal[string] , identifier[DEFAULT_PADDING] )
} | def _create_figure(kwargs: Mapping[str, Any]) -> dict:
"""Create basic dictionary object with figure properties."""
return {'$schema': 'https://vega.github.io/schema/vega/v3.json', 'width': kwargs.pop('width', DEFAULT_WIDTH), 'height': kwargs.pop('height', DEFAULT_HEIGHT), 'padding': kwargs.pop('padding', DEFAULT_PADDING)} |
def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
array_view = self.to_regular_array(points)
non_negative = np.all(np.greater_equal(array_view, 0))
correct_sum = np.all(np.sum(array_view, axis=-1) == self.n_meas)
return non_negative and correct_sum | def function[in_domain, parameter[self, points]]:
constant[
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
]
variable[array_view] assign[=] call[name[self].to_regular_array, parameter[name[points]]]
variable[non_negative] assign[=] call[name[np].all, parameter[call[name[np].greater_equal, parameter[name[array_view], constant[0]]]]]
variable[correct_sum] assign[=] call[name[np].all, parameter[compare[call[name[np].sum, parameter[name[array_view]]] equal[==] name[self].n_meas]]]
return[<ast.BoolOp object at 0x7da1b0d330d0>] | keyword[def] identifier[in_domain] ( identifier[self] , identifier[points] ):
literal[string]
identifier[array_view] = identifier[self] . identifier[to_regular_array] ( identifier[points] )
identifier[non_negative] = identifier[np] . identifier[all] ( identifier[np] . identifier[greater_equal] ( identifier[array_view] , literal[int] ))
identifier[correct_sum] = identifier[np] . identifier[all] ( identifier[np] . identifier[sum] ( identifier[array_view] , identifier[axis] =- literal[int] )== identifier[self] . identifier[n_meas] )
keyword[return] identifier[non_negative] keyword[and] identifier[correct_sum] | def in_domain(self, points):
"""
Returns ``True`` if all of the given points are in the domain,
``False`` otherwise.
:param np.ndarray points: An `np.ndarray` of type `self.dtype`.
:rtype: `bool`
"""
array_view = self.to_regular_array(points)
non_negative = np.all(np.greater_equal(array_view, 0))
correct_sum = np.all(np.sum(array_view, axis=-1) == self.n_meas)
return non_negative and correct_sum |
def filterbank_log(sr, n_freq, n_bins=84, bins_per_octave=12,
fmin=None, spread=0.125): # pragma: no cover
"""[np] Approximate a constant-Q filter bank for a fixed-window STFT.
Each filter is a log-normal window centered at the corresponding frequency.
Note: `logfrequency` in librosa 0.4 (deprecated), so copy-and-pasted,
`tuning` was removed, `n_freq` instead of `n_fft`.
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_freq : int > 0 [scalar]
number of frequency bins
n_bins : int > 0 [scalar]
Number of bins. Defaults to 84 (7 octaves).
bins_per_octave : int > 0 [scalar]
Number of bins per octave. Defaults to 12 (semitones).
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
spread : float > 0 [scalar]
Spread of each filter, as a fraction of a bin.
Returns
-------
C : np.ndarray [shape=(n_bins, 1 + n_fft/2)]
log-frequency filter bank.
"""
if fmin is None:
fmin = 32.70319566
# What's the shape parameter for our log-normal filters?
sigma = float(spread) / bins_per_octave
# Construct the output matrix
basis = np.zeros((n_bins, n_freq))
# Get log frequencies of bins
log_freqs = np.log2(librosa.fft_frequencies(sr, (n_freq - 1) * 2)[1:])
for i in range(n_bins):
# What's the center (median) frequency of this filter?
c_freq = fmin * (2.0 ** (float(i) / bins_per_octave))
# Place a log-normal window around c_freq
basis[i, 1:] = np.exp(-0.5 * ((log_freqs - np.log2(c_freq)) / sigma) ** 2
- np.log2(sigma) - log_freqs)
# Normalize the filters
basis = librosa.util.normalize(basis, norm=1, axis=1)
return basis.astype(K.floatx()) | def function[filterbank_log, parameter[sr, n_freq, n_bins, bins_per_octave, fmin, spread]]:
constant[[np] Approximate a constant-Q filter bank for a fixed-window STFT.
Each filter is a log-normal window centered at the corresponding frequency.
Note: `logfrequency` in librosa 0.4 (deprecated), so copy-and-pasted,
`tuning` was removed, `n_freq` instead of `n_fft`.
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_freq : int > 0 [scalar]
number of frequency bins
n_bins : int > 0 [scalar]
Number of bins. Defaults to 84 (7 octaves).
bins_per_octave : int > 0 [scalar]
Number of bins per octave. Defaults to 12 (semitones).
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
spread : float > 0 [scalar]
Spread of each filter, as a fraction of a bin.
Returns
-------
C : np.ndarray [shape=(n_bins, 1 + n_fft/2)]
log-frequency filter bank.
]
if compare[name[fmin] is constant[None]] begin[:]
variable[fmin] assign[=] constant[32.70319566]
variable[sigma] assign[=] binary_operation[call[name[float], parameter[name[spread]]] / name[bins_per_octave]]
variable[basis] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b19eef80>, <ast.Name object at 0x7da1b19eefe0>]]]]
variable[log_freqs] assign[=] call[name[np].log2, parameter[call[call[name[librosa].fft_frequencies, parameter[name[sr], binary_operation[binary_operation[name[n_freq] - constant[1]] * constant[2]]]]][<ast.Slice object at 0x7da1b19eca00>]]]
for taget[name[i]] in starred[call[name[range], parameter[name[n_bins]]]] begin[:]
variable[c_freq] assign[=] binary_operation[name[fmin] * binary_operation[constant[2.0] ** binary_operation[call[name[float], parameter[name[i]]] / name[bins_per_octave]]]]
call[name[basis]][tuple[[<ast.Name object at 0x7da1b19ee2f0>, <ast.Slice object at 0x7da1b19ec970>]]] assign[=] call[name[np].exp, parameter[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b19eeaa0> * binary_operation[binary_operation[binary_operation[name[log_freqs] - call[name[np].log2, parameter[name[c_freq]]]] / name[sigma]] ** constant[2]]] - call[name[np].log2, parameter[name[sigma]]]] - name[log_freqs]]]]
variable[basis] assign[=] call[name[librosa].util.normalize, parameter[name[basis]]]
return[call[name[basis].astype, parameter[call[name[K].floatx, parameter[]]]]] | keyword[def] identifier[filterbank_log] ( identifier[sr] , identifier[n_freq] , identifier[n_bins] = literal[int] , identifier[bins_per_octave] = literal[int] ,
identifier[fmin] = keyword[None] , identifier[spread] = literal[int] ):
literal[string]
keyword[if] identifier[fmin] keyword[is] keyword[None] :
identifier[fmin] = literal[int]
identifier[sigma] = identifier[float] ( identifier[spread] )/ identifier[bins_per_octave]
identifier[basis] = identifier[np] . identifier[zeros] (( identifier[n_bins] , identifier[n_freq] ))
identifier[log_freqs] = identifier[np] . identifier[log2] ( identifier[librosa] . identifier[fft_frequencies] ( identifier[sr] ,( identifier[n_freq] - literal[int] )* literal[int] )[ literal[int] :])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_bins] ):
identifier[c_freq] = identifier[fmin] *( literal[int] **( identifier[float] ( identifier[i] )/ identifier[bins_per_octave] ))
identifier[basis] [ identifier[i] , literal[int] :]= identifier[np] . identifier[exp] (- literal[int] *(( identifier[log_freqs] - identifier[np] . identifier[log2] ( identifier[c_freq] ))/ identifier[sigma] )** literal[int]
- identifier[np] . identifier[log2] ( identifier[sigma] )- identifier[log_freqs] )
identifier[basis] = identifier[librosa] . identifier[util] . identifier[normalize] ( identifier[basis] , identifier[norm] = literal[int] , identifier[axis] = literal[int] )
keyword[return] identifier[basis] . identifier[astype] ( identifier[K] . identifier[floatx] ()) | def filterbank_log(sr, n_freq, n_bins=84, bins_per_octave=12, fmin=None, spread=0.125): # pragma: no cover
'[np] Approximate a constant-Q filter bank for a fixed-window STFT.\n\n Each filter is a log-normal window centered at the corresponding frequency.\n\n Note: `logfrequency` in librosa 0.4 (deprecated), so copy-and-pasted,\n `tuning` was removed, `n_freq` instead of `n_fft`.\n\n Parameters\n ----------\n sr : number > 0 [scalar]\n audio sampling rate\n\n n_freq : int > 0 [scalar]\n number of frequency bins\n\n n_bins : int > 0 [scalar]\n Number of bins. Defaults to 84 (7 octaves).\n\n bins_per_octave : int > 0 [scalar]\n Number of bins per octave. Defaults to 12 (semitones).\n\n fmin : float > 0 [scalar]\n Minimum frequency bin. Defaults to `C1 ~= 32.70`\n\n spread : float > 0 [scalar]\n Spread of each filter, as a fraction of a bin.\n\n Returns\n -------\n C : np.ndarray [shape=(n_bins, 1 + n_fft/2)]\n log-frequency filter bank.\n '
if fmin is None:
fmin = 32.70319566 # depends on [control=['if'], data=['fmin']]
# What's the shape parameter for our log-normal filters?
sigma = float(spread) / bins_per_octave
# Construct the output matrix
basis = np.zeros((n_bins, n_freq))
# Get log frequencies of bins
log_freqs = np.log2(librosa.fft_frequencies(sr, (n_freq - 1) * 2)[1:])
for i in range(n_bins):
# What's the center (median) frequency of this filter?
c_freq = fmin * 2.0 ** (float(i) / bins_per_octave)
# Place a log-normal window around c_freq
basis[i, 1:] = np.exp(-0.5 * ((log_freqs - np.log2(c_freq)) / sigma) ** 2 - np.log2(sigma) - log_freqs) # depends on [control=['for'], data=['i']]
# Normalize the filters
basis = librosa.util.normalize(basis, norm=1, axis=1)
return basis.astype(K.floatx()) |
def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE or assumed as
plpgsql if inside a DO statement and no LANGUAGE has been found.
"""
l = None
m = language_re.match(lexer.text[match.end():match.end()+100])
if m is not None:
l = lexer._get_lexer(m.group(1))
else:
m = list(language_re.finditer(
lexer.text[max(0, match.start()-100):match.start()]))
if m:
l = lexer._get_lexer(m[-1].group(1))
else:
m = list(do_re.finditer(
lexer.text[max(0, match.start()-25):match.start()]))
if m:
l = lexer._get_lexer('plpgsql')
# 1 = $, 2 = delimiter, 3 = $
yield (match.start(1), String, match.group(1))
yield (match.start(2), String.Delimiter, match.group(2))
yield (match.start(3), String, match.group(3))
# 4 = string contents
if l:
for x in l.get_tokens_unprocessed(match.group(4)):
yield x
else:
yield (match.start(4), String, match.group(4))
# 5 = $, 6 = delimiter, 7 = $
yield (match.start(5), String, match.group(5))
yield (match.start(6), String.Delimiter, match.group(6))
yield (match.start(7), String, match.group(7)) | def function[language_callback, parameter[lexer, match]]:
constant[Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE or assumed as
plpgsql if inside a DO statement and no LANGUAGE has been found.
]
variable[l] assign[=] constant[None]
variable[m] assign[=] call[name[language_re].match, parameter[call[name[lexer].text][<ast.Slice object at 0x7da1b120a290>]]]
if compare[name[m] is_not constant[None]] begin[:]
variable[l] assign[=] call[name[lexer]._get_lexer, parameter[call[name[m].group, parameter[constant[1]]]]]
<ast.Yield object at 0x7da18bc70c10>
<ast.Yield object at 0x7da18bc72ad0>
<ast.Yield object at 0x7da18bc72aa0>
if name[l] begin[:]
for taget[name[x]] in starred[call[name[l].get_tokens_unprocessed, parameter[call[name[match].group, parameter[constant[4]]]]]] begin[:]
<ast.Yield object at 0x7da18bc73010>
<ast.Yield object at 0x7da18bc73520>
<ast.Yield object at 0x7da18bc71390>
<ast.Yield object at 0x7da1b11f4af0> | keyword[def] identifier[language_callback] ( identifier[lexer] , identifier[match] ):
literal[string]
identifier[l] = keyword[None]
identifier[m] = identifier[language_re] . identifier[match] ( identifier[lexer] . identifier[text] [ identifier[match] . identifier[end] (): identifier[match] . identifier[end] ()+ literal[int] ])
keyword[if] identifier[m] keyword[is] keyword[not] keyword[None] :
identifier[l] = identifier[lexer] . identifier[_get_lexer] ( identifier[m] . identifier[group] ( literal[int] ))
keyword[else] :
identifier[m] = identifier[list] ( identifier[language_re] . identifier[finditer] (
identifier[lexer] . identifier[text] [ identifier[max] ( literal[int] , identifier[match] . identifier[start] ()- literal[int] ): identifier[match] . identifier[start] ()]))
keyword[if] identifier[m] :
identifier[l] = identifier[lexer] . identifier[_get_lexer] ( identifier[m] [- literal[int] ]. identifier[group] ( literal[int] ))
keyword[else] :
identifier[m] = identifier[list] ( identifier[do_re] . identifier[finditer] (
identifier[lexer] . identifier[text] [ identifier[max] ( literal[int] , identifier[match] . identifier[start] ()- literal[int] ): identifier[match] . identifier[start] ()]))
keyword[if] identifier[m] :
identifier[l] = identifier[lexer] . identifier[_get_lexer] ( literal[string] )
keyword[yield] ( identifier[match] . identifier[start] ( literal[int] ), identifier[String] , identifier[match] . identifier[group] ( literal[int] ))
keyword[yield] ( identifier[match] . identifier[start] ( literal[int] ), identifier[String] . identifier[Delimiter] , identifier[match] . identifier[group] ( literal[int] ))
keyword[yield] ( identifier[match] . identifier[start] ( literal[int] ), identifier[String] , identifier[match] . identifier[group] ( literal[int] ))
keyword[if] identifier[l] :
keyword[for] identifier[x] keyword[in] identifier[l] . identifier[get_tokens_unprocessed] ( identifier[match] . identifier[group] ( literal[int] )):
keyword[yield] identifier[x]
keyword[else] :
keyword[yield] ( identifier[match] . identifier[start] ( literal[int] ), identifier[String] , identifier[match] . identifier[group] ( literal[int] ))
keyword[yield] ( identifier[match] . identifier[start] ( literal[int] ), identifier[String] , identifier[match] . identifier[group] ( literal[int] ))
keyword[yield] ( identifier[match] . identifier[start] ( literal[int] ), identifier[String] . identifier[Delimiter] , identifier[match] . identifier[group] ( literal[int] ))
keyword[yield] ( identifier[match] . identifier[start] ( literal[int] ), identifier[String] , identifier[match] . identifier[group] ( literal[int] )) | def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE or assumed as
plpgsql if inside a DO statement and no LANGUAGE has been found.
"""
l = None
m = language_re.match(lexer.text[match.end():match.end() + 100])
if m is not None:
l = lexer._get_lexer(m.group(1)) # depends on [control=['if'], data=['m']]
else:
m = list(language_re.finditer(lexer.text[max(0, match.start() - 100):match.start()]))
if m:
l = lexer._get_lexer(m[-1].group(1)) # depends on [control=['if'], data=[]]
else:
m = list(do_re.finditer(lexer.text[max(0, match.start() - 25):match.start()]))
if m:
l = lexer._get_lexer('plpgsql') # depends on [control=['if'], data=[]]
# 1 = $, 2 = delimiter, 3 = $
yield (match.start(1), String, match.group(1))
yield (match.start(2), String.Delimiter, match.group(2))
yield (match.start(3), String, match.group(3))
# 4 = string contents
if l:
for x in l.get_tokens_unprocessed(match.group(4)):
yield x # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=[]]
else:
yield (match.start(4), String, match.group(4))
# 5 = $, 6 = delimiter, 7 = $
yield (match.start(5), String, match.group(5))
yield (match.start(6), String.Delimiter, match.group(6))
yield (match.start(7), String, match.group(7)) |
def compute_message_authenticator(radius_packet, packed_req_authenticator,
shared_secret):
"""
Computes the "Message-Authenticator" of a given RADIUS packet.
"""
data = prepare_packed_data(radius_packet, packed_req_authenticator)
radius_hmac = hmac.new(shared_secret, data, hashlib.md5)
return radius_hmac.digest() | def function[compute_message_authenticator, parameter[radius_packet, packed_req_authenticator, shared_secret]]:
constant[
Computes the "Message-Authenticator" of a given RADIUS packet.
]
variable[data] assign[=] call[name[prepare_packed_data], parameter[name[radius_packet], name[packed_req_authenticator]]]
variable[radius_hmac] assign[=] call[name[hmac].new, parameter[name[shared_secret], name[data], name[hashlib].md5]]
return[call[name[radius_hmac].digest, parameter[]]] | keyword[def] identifier[compute_message_authenticator] ( identifier[radius_packet] , identifier[packed_req_authenticator] ,
identifier[shared_secret] ):
literal[string]
identifier[data] = identifier[prepare_packed_data] ( identifier[radius_packet] , identifier[packed_req_authenticator] )
identifier[radius_hmac] = identifier[hmac] . identifier[new] ( identifier[shared_secret] , identifier[data] , identifier[hashlib] . identifier[md5] )
keyword[return] identifier[radius_hmac] . identifier[digest] () | def compute_message_authenticator(radius_packet, packed_req_authenticator, shared_secret):
"""
Computes the "Message-Authenticator" of a given RADIUS packet.
"""
data = prepare_packed_data(radius_packet, packed_req_authenticator)
radius_hmac = hmac.new(shared_secret, data, hashlib.md5)
return radius_hmac.digest() |
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
sqrtbz= nu.sqrt(self._b2+z**2.)
asqrtbz= self._a+sqrtbz
if isinstance(R,float) and sqrtbz == asqrtbz:
return -(3.*R*z/(R**2.+asqrtbz**2.)**2.5)
else:
return -(3.*R*z*asqrtbz
/sqrtbz/(R**2.+asqrtbz**2.)**2.5) | def function[_Rzderiv, parameter[self, R, z, phi, t]]:
constant[
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
]
variable[sqrtbz] assign[=] call[name[nu].sqrt, parameter[binary_operation[name[self]._b2 + binary_operation[name[z] ** constant[2.0]]]]]
variable[asqrtbz] assign[=] binary_operation[name[self]._a + name[sqrtbz]]
if <ast.BoolOp object at 0x7da1b0cb5840> begin[:]
return[<ast.UnaryOp object at 0x7da1b0cb53f0>] | keyword[def] identifier[_Rzderiv] ( identifier[self] , identifier[R] , identifier[z] , identifier[phi] = literal[int] , identifier[t] = literal[int] ):
literal[string]
identifier[sqrtbz] = identifier[nu] . identifier[sqrt] ( identifier[self] . identifier[_b2] + identifier[z] ** literal[int] )
identifier[asqrtbz] = identifier[self] . identifier[_a] + identifier[sqrtbz]
keyword[if] identifier[isinstance] ( identifier[R] , identifier[float] ) keyword[and] identifier[sqrtbz] == identifier[asqrtbz] :
keyword[return] -( literal[int] * identifier[R] * identifier[z] /( identifier[R] ** literal[int] + identifier[asqrtbz] ** literal[int] )** literal[int] )
keyword[else] :
keyword[return] -( literal[int] * identifier[R] * identifier[z] * identifier[asqrtbz]
/ identifier[sqrtbz] /( identifier[R] ** literal[int] + identifier[asqrtbz] ** literal[int] )** literal[int] ) | def _Rzderiv(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
sqrtbz = nu.sqrt(self._b2 + z ** 2.0)
asqrtbz = self._a + sqrtbz
if isinstance(R, float) and sqrtbz == asqrtbz:
return -(3.0 * R * z / (R ** 2.0 + asqrtbz ** 2.0) ** 2.5) # depends on [control=['if'], data=[]]
else:
return -(3.0 * R * z * asqrtbz / sqrtbz / (R ** 2.0 + asqrtbz ** 2.0) ** 2.5) |
def mill(it, label='', hide=None, expected_size=None, every=1):
"""Progress iterator. Prints a mill while iterating over the items."""
def _mill_char(_i):
if _i >= count:
return ' '
else:
return MILL_CHARS[(_i // every) % len(MILL_CHARS)]
def _show(_i):
if not hide:
if ((_i % every) == 0 or # True every "every" updates
(_i == count)): # And when we're done
STREAM.write(MILL_TEMPLATE % (
label, _mill_char(_i), _i, count))
STREAM.flush()
count = len(it) if expected_size is None else expected_size
if count:
_show(0)
for i, item in enumerate(it):
yield item
_show(i + 1)
if not hide:
STREAM.write('\n')
STREAM.flush() | def function[mill, parameter[it, label, hide, expected_size, every]]:
constant[Progress iterator. Prints a mill while iterating over the items.]
def function[_mill_char, parameter[_i]]:
if compare[name[_i] greater_or_equal[>=] name[count]] begin[:]
return[constant[ ]]
def function[_show, parameter[_i]]:
if <ast.UnaryOp object at 0x7da1b1d8bc10> begin[:]
if <ast.BoolOp object at 0x7da1b1d8ace0> begin[:]
call[name[STREAM].write, parameter[binary_operation[name[MILL_TEMPLATE] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1d89690>, <ast.Call object at 0x7da1b1d88610>, <ast.Name object at 0x7da1b1d89390>, <ast.Name object at 0x7da1b1d892d0>]]]]]
call[name[STREAM].flush, parameter[]]
variable[count] assign[=] <ast.IfExp object at 0x7da1b1d88100>
if name[count] begin[:]
call[name[_show], parameter[constant[0]]]
for taget[tuple[[<ast.Name object at 0x7da1b1d5f940>, <ast.Name object at 0x7da1b1d5d330>]]] in starred[call[name[enumerate], parameter[name[it]]]] begin[:]
<ast.Yield object at 0x7da1b1d5ff70>
call[name[_show], parameter[binary_operation[name[i] + constant[1]]]]
if <ast.UnaryOp object at 0x7da1b1d5c640> begin[:]
call[name[STREAM].write, parameter[constant[
]]]
call[name[STREAM].flush, parameter[]] | keyword[def] identifier[mill] ( identifier[it] , identifier[label] = literal[string] , identifier[hide] = keyword[None] , identifier[expected_size] = keyword[None] , identifier[every] = literal[int] ):
literal[string]
keyword[def] identifier[_mill_char] ( identifier[_i] ):
keyword[if] identifier[_i] >= identifier[count] :
keyword[return] literal[string]
keyword[else] :
keyword[return] identifier[MILL_CHARS] [( identifier[_i] // identifier[every] )% identifier[len] ( identifier[MILL_CHARS] )]
keyword[def] identifier[_show] ( identifier[_i] ):
keyword[if] keyword[not] identifier[hide] :
keyword[if] (( identifier[_i] % identifier[every] )== literal[int] keyword[or]
( identifier[_i] == identifier[count] )):
identifier[STREAM] . identifier[write] ( identifier[MILL_TEMPLATE] %(
identifier[label] , identifier[_mill_char] ( identifier[_i] ), identifier[_i] , identifier[count] ))
identifier[STREAM] . identifier[flush] ()
identifier[count] = identifier[len] ( identifier[it] ) keyword[if] identifier[expected_size] keyword[is] keyword[None] keyword[else] identifier[expected_size]
keyword[if] identifier[count] :
identifier[_show] ( literal[int] )
keyword[for] identifier[i] , identifier[item] keyword[in] identifier[enumerate] ( identifier[it] ):
keyword[yield] identifier[item]
identifier[_show] ( identifier[i] + literal[int] )
keyword[if] keyword[not] identifier[hide] :
identifier[STREAM] . identifier[write] ( literal[string] )
identifier[STREAM] . identifier[flush] () | def mill(it, label='', hide=None, expected_size=None, every=1):
"""Progress iterator. Prints a mill while iterating over the items."""
def _mill_char(_i):
if _i >= count:
return ' ' # depends on [control=['if'], data=[]]
else:
return MILL_CHARS[_i // every % len(MILL_CHARS)]
def _show(_i):
if not hide:
if _i % every == 0 or _i == count: # True every "every" updates
# And when we're done
STREAM.write(MILL_TEMPLATE % (label, _mill_char(_i), _i, count))
STREAM.flush() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
count = len(it) if expected_size is None else expected_size
if count:
_show(0) # depends on [control=['if'], data=[]]
for (i, item) in enumerate(it):
yield item
_show(i + 1) # depends on [control=['for'], data=[]]
if not hide:
STREAM.write('\n')
STREAM.flush() # depends on [control=['if'], data=[]] |
def p_NonAnyType_interface(p):
"""NonAnyType : IDENTIFIER TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.InterfaceType(name=p[1]), p[2]) | def function[p_NonAnyType_interface, parameter[p]]:
constant[NonAnyType : IDENTIFIER TypeSuffix]
call[name[p]][constant[0]] assign[=] call[name[helper].unwrapTypeSuffix, parameter[call[name[model].InterfaceType, parameter[]], call[name[p]][constant[2]]]] | keyword[def] identifier[p_NonAnyType_interface] ( identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[helper] . identifier[unwrapTypeSuffix] ( identifier[model] . identifier[InterfaceType] ( identifier[name] = identifier[p] [ literal[int] ]), identifier[p] [ literal[int] ]) | def p_NonAnyType_interface(p):
"""NonAnyType : IDENTIFIER TypeSuffix"""
p[0] = helper.unwrapTypeSuffix(model.InterfaceType(name=p[1]), p[2]) |
def rollback(self):
"""Implementation of NAPALM method rollback."""
commands = []
commands.append('configure replace flash:rollback-0')
commands.append('write memory')
self.device.run_commands(commands) | def function[rollback, parameter[self]]:
constant[Implementation of NAPALM method rollback.]
variable[commands] assign[=] list[[]]
call[name[commands].append, parameter[constant[configure replace flash:rollback-0]]]
call[name[commands].append, parameter[constant[write memory]]]
call[name[self].device.run_commands, parameter[name[commands]]] | keyword[def] identifier[rollback] ( identifier[self] ):
literal[string]
identifier[commands] =[]
identifier[commands] . identifier[append] ( literal[string] )
identifier[commands] . identifier[append] ( literal[string] )
identifier[self] . identifier[device] . identifier[run_commands] ( identifier[commands] ) | def rollback(self):
"""Implementation of NAPALM method rollback."""
commands = []
commands.append('configure replace flash:rollback-0')
commands.append('write memory')
self.device.run_commands(commands) |
def _incr_executions(self):
"""Increment the number of executions for the current connection."""
self._pool_manager.get_connection(self.pid, self._conn).executions += 1 | def function[_incr_executions, parameter[self]]:
constant[Increment the number of executions for the current connection.]
<ast.AugAssign object at 0x7da2041d9450> | keyword[def] identifier[_incr_executions] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_pool_manager] . identifier[get_connection] ( identifier[self] . identifier[pid] , identifier[self] . identifier[_conn] ). identifier[executions] += literal[int] | def _incr_executions(self):
"""Increment the number of executions for the current connection."""
self._pool_manager.get_connection(self.pid, self._conn).executions += 1 |
def transformer_tall_pretrain_lm_tpu_adafactor():
"""Hparams for transformer on LM pretraining (with 64k vocab) on TPU."""
hparams = transformer_tall_pretrain_lm()
update_hparams_for_tpu(hparams)
hparams.max_length = 1024
# For multi-problem on TPU we need it in absolute examples.
hparams.batch_size = 8
hparams.multiproblem_vocab_size = 2**16
return hparams | def function[transformer_tall_pretrain_lm_tpu_adafactor, parameter[]]:
constant[Hparams for transformer on LM pretraining (with 64k vocab) on TPU.]
variable[hparams] assign[=] call[name[transformer_tall_pretrain_lm], parameter[]]
call[name[update_hparams_for_tpu], parameter[name[hparams]]]
name[hparams].max_length assign[=] constant[1024]
name[hparams].batch_size assign[=] constant[8]
name[hparams].multiproblem_vocab_size assign[=] binary_operation[constant[2] ** constant[16]]
return[name[hparams]] | keyword[def] identifier[transformer_tall_pretrain_lm_tpu_adafactor] ():
literal[string]
identifier[hparams] = identifier[transformer_tall_pretrain_lm] ()
identifier[update_hparams_for_tpu] ( identifier[hparams] )
identifier[hparams] . identifier[max_length] = literal[int]
identifier[hparams] . identifier[batch_size] = literal[int]
identifier[hparams] . identifier[multiproblem_vocab_size] = literal[int] ** literal[int]
keyword[return] identifier[hparams] | def transformer_tall_pretrain_lm_tpu_adafactor():
"""Hparams for transformer on LM pretraining (with 64k vocab) on TPU."""
hparams = transformer_tall_pretrain_lm()
update_hparams_for_tpu(hparams)
hparams.max_length = 1024
# For multi-problem on TPU we need it in absolute examples.
hparams.batch_size = 8
hparams.multiproblem_vocab_size = 2 ** 16
return hparams |
def make_hash(keys, **kwargs):
"""
Creates a perfect hash function from the given keys. For a
description of the keyword arguments see :py:func:`hash_parameters`.
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> hf = make_hash(l)
>>> hf(19)
1
>>> hash_parameters(l).slots[1]
19
"""
params = hash_parameters(keys, **kwargs)
t = params.t
r = params.r
offset = params.offset
to_int = params.to_int if params.to_int else __identity
def perfect_hash(x):
val = to_int(x) + offset
x = val % t
y = val // t
return x + r[y]
# Undocumented properties, but used in make_dict()...
perfect_hash.length = len(params.slots)
perfect_hash.slots = params.slots
return perfect_hash | def function[make_hash, parameter[keys]]:
constant[
Creates a perfect hash function from the given keys. For a
description of the keyword arguments see :py:func:`hash_parameters`.
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> hf = make_hash(l)
>>> hf(19)
1
>>> hash_parameters(l).slots[1]
19
]
variable[params] assign[=] call[name[hash_parameters], parameter[name[keys]]]
variable[t] assign[=] name[params].t
variable[r] assign[=] name[params].r
variable[offset] assign[=] name[params].offset
variable[to_int] assign[=] <ast.IfExp object at 0x7da2041dbd90>
def function[perfect_hash, parameter[x]]:
variable[val] assign[=] binary_operation[call[name[to_int], parameter[name[x]]] + name[offset]]
variable[x] assign[=] binary_operation[name[val] <ast.Mod object at 0x7da2590d6920> name[t]]
variable[y] assign[=] binary_operation[name[val] <ast.FloorDiv object at 0x7da2590d6bc0> name[t]]
return[binary_operation[name[x] + call[name[r]][name[y]]]]
name[perfect_hash].length assign[=] call[name[len], parameter[name[params].slots]]
name[perfect_hash].slots assign[=] name[params].slots
return[name[perfect_hash]] | keyword[def] identifier[make_hash] ( identifier[keys] ,** identifier[kwargs] ):
literal[string]
identifier[params] = identifier[hash_parameters] ( identifier[keys] ,** identifier[kwargs] )
identifier[t] = identifier[params] . identifier[t]
identifier[r] = identifier[params] . identifier[r]
identifier[offset] = identifier[params] . identifier[offset]
identifier[to_int] = identifier[params] . identifier[to_int] keyword[if] identifier[params] . identifier[to_int] keyword[else] identifier[__identity]
keyword[def] identifier[perfect_hash] ( identifier[x] ):
identifier[val] = identifier[to_int] ( identifier[x] )+ identifier[offset]
identifier[x] = identifier[val] % identifier[t]
identifier[y] = identifier[val] // identifier[t]
keyword[return] identifier[x] + identifier[r] [ identifier[y] ]
identifier[perfect_hash] . identifier[length] = identifier[len] ( identifier[params] . identifier[slots] )
identifier[perfect_hash] . identifier[slots] = identifier[params] . identifier[slots]
keyword[return] identifier[perfect_hash] | def make_hash(keys, **kwargs):
"""
Creates a perfect hash function from the given keys. For a
description of the keyword arguments see :py:func:`hash_parameters`.
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> hf = make_hash(l)
>>> hf(19)
1
>>> hash_parameters(l).slots[1]
19
"""
params = hash_parameters(keys, **kwargs)
t = params.t
r = params.r
offset = params.offset
to_int = params.to_int if params.to_int else __identity
def perfect_hash(x):
val = to_int(x) + offset
x = val % t
y = val // t
return x + r[y]
# Undocumented properties, but used in make_dict()...
perfect_hash.length = len(params.slots)
perfect_hash.slots = params.slots
return perfect_hash |
def _CreateOutputModule(self, options):
"""Creates the output module.
Args:
options (argparse.Namespace): command line arguments.
Returns:
OutputModule: output module.
Raises:
RuntimeError: if the output module cannot be created.
"""
formatter_mediator = formatters_mediator.FormatterMediator(
data_location=self._data_location)
try:
formatter_mediator.SetPreferredLanguageIdentifier(
self._preferred_language)
except (KeyError, TypeError) as exception:
raise RuntimeError(exception)
mediator = output_mediator.OutputMediator(
self._knowledge_base, formatter_mediator,
preferred_encoding=self.preferred_encoding)
mediator.SetTimezone(self._preferred_time_zone)
try:
output_module = output_manager.OutputManager.NewOutputModule(
self._output_format, mediator)
except (KeyError, ValueError) as exception:
raise RuntimeError(
'Unable to create output module with error: {0!s}'.format(
exception))
if output_manager.OutputManager.IsLinearOutputModule(self._output_format):
output_file_object = open(self._output_filename, 'wb')
output_writer = tools.FileObjectOutputWriter(output_file_object)
output_module.SetOutputWriter(output_writer)
helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)
# Check if there are parameters that have not been defined and need to
# in order for the output module to continue. Prompt user to supply
# those that may be missing.
missing_parameters = output_module.GetMissingArguments()
while missing_parameters:
for parameter in missing_parameters:
value = self._PromptUserForInput(
'Missing parameter {0:s} for output module'.format(parameter))
if value is None:
logger.warning(
'Unable to set the missing parameter for: {0:s}'.format(
parameter))
continue
setattr(options, parameter, value)
helpers_manager.ArgumentHelperManager.ParseOptions(
options, output_module)
missing_parameters = output_module.GetMissingArguments()
return output_module | def function[_CreateOutputModule, parameter[self, options]]:
constant[Creates the output module.
Args:
options (argparse.Namespace): command line arguments.
Returns:
OutputModule: output module.
Raises:
RuntimeError: if the output module cannot be created.
]
variable[formatter_mediator] assign[=] call[name[formatters_mediator].FormatterMediator, parameter[]]
<ast.Try object at 0x7da2041daa40>
variable[mediator] assign[=] call[name[output_mediator].OutputMediator, parameter[name[self]._knowledge_base, name[formatter_mediator]]]
call[name[mediator].SetTimezone, parameter[name[self]._preferred_time_zone]]
<ast.Try object at 0x7da18eb55180>
if call[name[output_manager].OutputManager.IsLinearOutputModule, parameter[name[self]._output_format]] begin[:]
variable[output_file_object] assign[=] call[name[open], parameter[name[self]._output_filename, constant[wb]]]
variable[output_writer] assign[=] call[name[tools].FileObjectOutputWriter, parameter[name[output_file_object]]]
call[name[output_module].SetOutputWriter, parameter[name[output_writer]]]
call[name[helpers_manager].ArgumentHelperManager.ParseOptions, parameter[name[options], name[output_module]]]
variable[missing_parameters] assign[=] call[name[output_module].GetMissingArguments, parameter[]]
while name[missing_parameters] begin[:]
for taget[name[parameter]] in starred[name[missing_parameters]] begin[:]
variable[value] assign[=] call[name[self]._PromptUserForInput, parameter[call[constant[Missing parameter {0:s} for output module].format, parameter[name[parameter]]]]]
if compare[name[value] is constant[None]] begin[:]
call[name[logger].warning, parameter[call[constant[Unable to set the missing parameter for: {0:s}].format, parameter[name[parameter]]]]]
continue
call[name[setattr], parameter[name[options], name[parameter], name[value]]]
call[name[helpers_manager].ArgumentHelperManager.ParseOptions, parameter[name[options], name[output_module]]]
variable[missing_parameters] assign[=] call[name[output_module].GetMissingArguments, parameter[]]
return[name[output_module]] | keyword[def] identifier[_CreateOutputModule] ( identifier[self] , identifier[options] ):
literal[string]
identifier[formatter_mediator] = identifier[formatters_mediator] . identifier[FormatterMediator] (
identifier[data_location] = identifier[self] . identifier[_data_location] )
keyword[try] :
identifier[formatter_mediator] . identifier[SetPreferredLanguageIdentifier] (
identifier[self] . identifier[_preferred_language] )
keyword[except] ( identifier[KeyError] , identifier[TypeError] ) keyword[as] identifier[exception] :
keyword[raise] identifier[RuntimeError] ( identifier[exception] )
identifier[mediator] = identifier[output_mediator] . identifier[OutputMediator] (
identifier[self] . identifier[_knowledge_base] , identifier[formatter_mediator] ,
identifier[preferred_encoding] = identifier[self] . identifier[preferred_encoding] )
identifier[mediator] . identifier[SetTimezone] ( identifier[self] . identifier[_preferred_time_zone] )
keyword[try] :
identifier[output_module] = identifier[output_manager] . identifier[OutputManager] . identifier[NewOutputModule] (
identifier[self] . identifier[_output_format] , identifier[mediator] )
keyword[except] ( identifier[KeyError] , identifier[ValueError] ) keyword[as] identifier[exception] :
keyword[raise] identifier[RuntimeError] (
literal[string] . identifier[format] (
identifier[exception] ))
keyword[if] identifier[output_manager] . identifier[OutputManager] . identifier[IsLinearOutputModule] ( identifier[self] . identifier[_output_format] ):
identifier[output_file_object] = identifier[open] ( identifier[self] . identifier[_output_filename] , literal[string] )
identifier[output_writer] = identifier[tools] . identifier[FileObjectOutputWriter] ( identifier[output_file_object] )
identifier[output_module] . identifier[SetOutputWriter] ( identifier[output_writer] )
identifier[helpers_manager] . identifier[ArgumentHelperManager] . identifier[ParseOptions] ( identifier[options] , identifier[output_module] )
identifier[missing_parameters] = identifier[output_module] . identifier[GetMissingArguments] ()
keyword[while] identifier[missing_parameters] :
keyword[for] identifier[parameter] keyword[in] identifier[missing_parameters] :
identifier[value] = identifier[self] . identifier[_PromptUserForInput] (
literal[string] . identifier[format] ( identifier[parameter] ))
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[logger] . identifier[warning] (
literal[string] . identifier[format] (
identifier[parameter] ))
keyword[continue]
identifier[setattr] ( identifier[options] , identifier[parameter] , identifier[value] )
identifier[helpers_manager] . identifier[ArgumentHelperManager] . identifier[ParseOptions] (
identifier[options] , identifier[output_module] )
identifier[missing_parameters] = identifier[output_module] . identifier[GetMissingArguments] ()
keyword[return] identifier[output_module] | def _CreateOutputModule(self, options):
"""Creates the output module.
Args:
options (argparse.Namespace): command line arguments.
Returns:
OutputModule: output module.
Raises:
RuntimeError: if the output module cannot be created.
"""
formatter_mediator = formatters_mediator.FormatterMediator(data_location=self._data_location)
try:
formatter_mediator.SetPreferredLanguageIdentifier(self._preferred_language) # depends on [control=['try'], data=[]]
except (KeyError, TypeError) as exception:
raise RuntimeError(exception) # depends on [control=['except'], data=['exception']]
mediator = output_mediator.OutputMediator(self._knowledge_base, formatter_mediator, preferred_encoding=self.preferred_encoding)
mediator.SetTimezone(self._preferred_time_zone)
try:
output_module = output_manager.OutputManager.NewOutputModule(self._output_format, mediator) # depends on [control=['try'], data=[]]
except (KeyError, ValueError) as exception:
raise RuntimeError('Unable to create output module with error: {0!s}'.format(exception)) # depends on [control=['except'], data=['exception']]
if output_manager.OutputManager.IsLinearOutputModule(self._output_format):
output_file_object = open(self._output_filename, 'wb')
output_writer = tools.FileObjectOutputWriter(output_file_object)
output_module.SetOutputWriter(output_writer) # depends on [control=['if'], data=[]]
helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)
# Check if there are parameters that have not been defined and need to
# in order for the output module to continue. Prompt user to supply
# those that may be missing.
missing_parameters = output_module.GetMissingArguments()
while missing_parameters:
for parameter in missing_parameters:
value = self._PromptUserForInput('Missing parameter {0:s} for output module'.format(parameter))
if value is None:
logger.warning('Unable to set the missing parameter for: {0:s}'.format(parameter))
continue # depends on [control=['if'], data=[]]
setattr(options, parameter, value) # depends on [control=['for'], data=['parameter']]
helpers_manager.ArgumentHelperManager.ParseOptions(options, output_module)
missing_parameters = output_module.GetMissingArguments() # depends on [control=['while'], data=[]]
return output_module |
def from_hdf5(hdf5, anno_id=None):
"""
Converts an HDF5 file to a RAMON object. Returns an object that is a child-
-class of RAMON (though it's determined at run-time what type is returned).
Accessing multiple IDs from the same file is not supported, because it's
not dramatically faster to access each item in the hdf5 file at the same
time It's semantically and computationally easier to run this function
several times on the same file.
Arguments:
hdf5 (h5py.File): A h5py File object that holds RAMON data
anno_id (int): The ID of the RAMON obj to extract from the file. This
defaults to the first one (sorted) if none is specified.
Returns:
ndio.RAMON object
"""
if anno_id is None:
# The user just wants the first item we find, so... Yeah.
return from_hdf5(hdf5, list(hdf5.keys())[0])
# First, get the actual object we're going to download.
anno_id = str(anno_id)
if anno_id not in list(hdf5.keys()):
raise ValueError("ID {} is not in this file. Options are: {}".format(
anno_id,
", ".join(list(hdf5.keys()))
))
anno = hdf5[anno_id]
# anno now holds just the RAMON of interest
# This is the most complicated line in here: It creates an object whose
# type is conditional on the ANNOTATION_TYPE of the hdf5 object.
try:
r = AnnotationType.get_class(anno['ANNOTATION_TYPE'][0])()
except:
raise InvalidRAMONError("This is not a valid RAMON type.")
# All RAMON types definitely have these attributes:
metadata = anno['METADATA']
r.author = metadata['AUTHOR'][0]
r.confidence = metadata['CONFIDENCE'][0]
r.status = metadata['STATUS'][0]
r.id = anno_id
# These are a little tougher, some RAMON types have special attributes:
if type(r) in [RAMONNeuron, RAMONSynapse]:
r.segments = metadata['SEGMENTS'][()]
if 'KVPAIRS' in metadata:
kvs = metadata['KVPAIRS'][()][0].split()
if len(kvs) != 0:
for i in kvs:
k, v = str(i).split(',')
r.kvpairs[str(k)] = str(v)
else:
r.kvpairs = {}
if issubclass(type(r), RAMONVolume):
if 'CUTOUT' in anno:
r.cutout = anno['CUTOUT'][()]
if 'XYZOFFSET' in anno:
r.cutout = anno['XYZOFFSET'][()]
if 'RESOLUTION' in anno:
r.cutout = anno['RESOLUTION'][()]
if type(r) is RAMONSynapse:
r.synapse_type = metadata['SYNAPSE_TYPE'][0]
r.weight = metadata['WEIGHT'][0]
if type(r) is RAMONSegment:
if 'NEURON' in metadata:
r.neuron = metadata['NEURON'][0]
if 'PARENTSEED' in metadata:
r.parent_seed = metadata['PARENTSEED'][0]
if 'SEGMENTCLASS' in metadata:
r.segmentclass = metadata['SEGMENTCLASS'][0]
if 'SYNAPSES' in metadata:
r.synapses = metadata['SYNAPSES'][()]
if 'ORGANELLES' in metadata:
r.organelles = metadata['ORGANELLES'][()]
if type(r) is RAMONOrganelle:
r.organelle_class = metadata['ORGANELLECLASS'][0]
return r | def function[from_hdf5, parameter[hdf5, anno_id]]:
constant[
Converts an HDF5 file to a RAMON object. Returns an object that is a child-
-class of RAMON (though it's determined at run-time what type is returned).
Accessing multiple IDs from the same file is not supported, because it's
not dramatically faster to access each item in the hdf5 file at the same
time It's semantically and computationally easier to run this function
several times on the same file.
Arguments:
hdf5 (h5py.File): A h5py File object that holds RAMON data
anno_id (int): The ID of the RAMON obj to extract from the file. This
defaults to the first one (sorted) if none is specified.
Returns:
ndio.RAMON object
]
if compare[name[anno_id] is constant[None]] begin[:]
return[call[name[from_hdf5], parameter[name[hdf5], call[call[name[list], parameter[call[name[hdf5].keys, parameter[]]]]][constant[0]]]]]
variable[anno_id] assign[=] call[name[str], parameter[name[anno_id]]]
if compare[name[anno_id] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[hdf5].keys, parameter[]]]]] begin[:]
<ast.Raise object at 0x7da1b0292170>
variable[anno] assign[=] call[name[hdf5]][name[anno_id]]
<ast.Try object at 0x7da1b0290640>
variable[metadata] assign[=] call[name[anno]][constant[METADATA]]
name[r].author assign[=] call[call[name[metadata]][constant[AUTHOR]]][constant[0]]
name[r].confidence assign[=] call[call[name[metadata]][constant[CONFIDENCE]]][constant[0]]
name[r].status assign[=] call[call[name[metadata]][constant[STATUS]]][constant[0]]
name[r].id assign[=] name[anno_id]
if compare[call[name[type], parameter[name[r]]] in list[[<ast.Name object at 0x7da1b0290910>, <ast.Name object at 0x7da1b0290eb0>]]] begin[:]
name[r].segments assign[=] call[call[name[metadata]][constant[SEGMENTS]]][tuple[[]]]
if compare[constant[KVPAIRS] in name[metadata]] begin[:]
variable[kvs] assign[=] call[call[call[call[name[metadata]][constant[KVPAIRS]]][tuple[[]]]][constant[0]].split, parameter[]]
if compare[call[name[len], parameter[name[kvs]]] not_equal[!=] constant[0]] begin[:]
for taget[name[i]] in starred[name[kvs]] begin[:]
<ast.Tuple object at 0x7da1b02133d0> assign[=] call[call[name[str], parameter[name[i]]].split, parameter[constant[,]]]
call[name[r].kvpairs][call[name[str], parameter[name[k]]]] assign[=] call[name[str], parameter[name[v]]]
if call[name[issubclass], parameter[call[name[type], parameter[name[r]]], name[RAMONVolume]]] begin[:]
if compare[constant[CUTOUT] in name[anno]] begin[:]
name[r].cutout assign[=] call[call[name[anno]][constant[CUTOUT]]][tuple[[]]]
if compare[constant[XYZOFFSET] in name[anno]] begin[:]
name[r].cutout assign[=] call[call[name[anno]][constant[XYZOFFSET]]][tuple[[]]]
if compare[constant[RESOLUTION] in name[anno]] begin[:]
name[r].cutout assign[=] call[call[name[anno]][constant[RESOLUTION]]][tuple[[]]]
if compare[call[name[type], parameter[name[r]]] is name[RAMONSynapse]] begin[:]
name[r].synapse_type assign[=] call[call[name[metadata]][constant[SYNAPSE_TYPE]]][constant[0]]
name[r].weight assign[=] call[call[name[metadata]][constant[WEIGHT]]][constant[0]]
if compare[call[name[type], parameter[name[r]]] is name[RAMONSegment]] begin[:]
if compare[constant[NEURON] in name[metadata]] begin[:]
name[r].neuron assign[=] call[call[name[metadata]][constant[NEURON]]][constant[0]]
if compare[constant[PARENTSEED] in name[metadata]] begin[:]
name[r].parent_seed assign[=] call[call[name[metadata]][constant[PARENTSEED]]][constant[0]]
if compare[constant[SEGMENTCLASS] in name[metadata]] begin[:]
name[r].segmentclass assign[=] call[call[name[metadata]][constant[SEGMENTCLASS]]][constant[0]]
if compare[constant[SYNAPSES] in name[metadata]] begin[:]
name[r].synapses assign[=] call[call[name[metadata]][constant[SYNAPSES]]][tuple[[]]]
if compare[constant[ORGANELLES] in name[metadata]] begin[:]
name[r].organelles assign[=] call[call[name[metadata]][constant[ORGANELLES]]][tuple[[]]]
if compare[call[name[type], parameter[name[r]]] is name[RAMONOrganelle]] begin[:]
name[r].organelle_class assign[=] call[call[name[metadata]][constant[ORGANELLECLASS]]][constant[0]]
return[name[r]] | keyword[def] identifier[from_hdf5] ( identifier[hdf5] , identifier[anno_id] = keyword[None] ):
literal[string]
keyword[if] identifier[anno_id] keyword[is] keyword[None] :
keyword[return] identifier[from_hdf5] ( identifier[hdf5] , identifier[list] ( identifier[hdf5] . identifier[keys] ())[ literal[int] ])
identifier[anno_id] = identifier[str] ( identifier[anno_id] )
keyword[if] identifier[anno_id] keyword[not] keyword[in] identifier[list] ( identifier[hdf5] . identifier[keys] ()):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[anno_id] ,
literal[string] . identifier[join] ( identifier[list] ( identifier[hdf5] . identifier[keys] ()))
))
identifier[anno] = identifier[hdf5] [ identifier[anno_id] ]
keyword[try] :
identifier[r] = identifier[AnnotationType] . identifier[get_class] ( identifier[anno] [ literal[string] ][ literal[int] ])()
keyword[except] :
keyword[raise] identifier[InvalidRAMONError] ( literal[string] )
identifier[metadata] = identifier[anno] [ literal[string] ]
identifier[r] . identifier[author] = identifier[metadata] [ literal[string] ][ literal[int] ]
identifier[r] . identifier[confidence] = identifier[metadata] [ literal[string] ][ literal[int] ]
identifier[r] . identifier[status] = identifier[metadata] [ literal[string] ][ literal[int] ]
identifier[r] . identifier[id] = identifier[anno_id]
keyword[if] identifier[type] ( identifier[r] ) keyword[in] [ identifier[RAMONNeuron] , identifier[RAMONSynapse] ]:
identifier[r] . identifier[segments] = identifier[metadata] [ literal[string] ][()]
keyword[if] literal[string] keyword[in] identifier[metadata] :
identifier[kvs] = identifier[metadata] [ literal[string] ][()][ literal[int] ]. identifier[split] ()
keyword[if] identifier[len] ( identifier[kvs] )!= literal[int] :
keyword[for] identifier[i] keyword[in] identifier[kvs] :
identifier[k] , identifier[v] = identifier[str] ( identifier[i] ). identifier[split] ( literal[string] )
identifier[r] . identifier[kvpairs] [ identifier[str] ( identifier[k] )]= identifier[str] ( identifier[v] )
keyword[else] :
identifier[r] . identifier[kvpairs] ={}
keyword[if] identifier[issubclass] ( identifier[type] ( identifier[r] ), identifier[RAMONVolume] ):
keyword[if] literal[string] keyword[in] identifier[anno] :
identifier[r] . identifier[cutout] = identifier[anno] [ literal[string] ][()]
keyword[if] literal[string] keyword[in] identifier[anno] :
identifier[r] . identifier[cutout] = identifier[anno] [ literal[string] ][()]
keyword[if] literal[string] keyword[in] identifier[anno] :
identifier[r] . identifier[cutout] = identifier[anno] [ literal[string] ][()]
keyword[if] identifier[type] ( identifier[r] ) keyword[is] identifier[RAMONSynapse] :
identifier[r] . identifier[synapse_type] = identifier[metadata] [ literal[string] ][ literal[int] ]
identifier[r] . identifier[weight] = identifier[metadata] [ literal[string] ][ literal[int] ]
keyword[if] identifier[type] ( identifier[r] ) keyword[is] identifier[RAMONSegment] :
keyword[if] literal[string] keyword[in] identifier[metadata] :
identifier[r] . identifier[neuron] = identifier[metadata] [ literal[string] ][ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[metadata] :
identifier[r] . identifier[parent_seed] = identifier[metadata] [ literal[string] ][ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[metadata] :
identifier[r] . identifier[segmentclass] = identifier[metadata] [ literal[string] ][ literal[int] ]
keyword[if] literal[string] keyword[in] identifier[metadata] :
identifier[r] . identifier[synapses] = identifier[metadata] [ literal[string] ][()]
keyword[if] literal[string] keyword[in] identifier[metadata] :
identifier[r] . identifier[organelles] = identifier[metadata] [ literal[string] ][()]
keyword[if] identifier[type] ( identifier[r] ) keyword[is] identifier[RAMONOrganelle] :
identifier[r] . identifier[organelle_class] = identifier[metadata] [ literal[string] ][ literal[int] ]
keyword[return] identifier[r] | def from_hdf5(hdf5, anno_id=None):
"""
Converts an HDF5 file to a RAMON object. Returns an object that is a child-
-class of RAMON (though it's determined at run-time what type is returned).
Accessing multiple IDs from the same file is not supported, because it's
not dramatically faster to access each item in the hdf5 file at the same
time It's semantically and computationally easier to run this function
several times on the same file.
Arguments:
hdf5 (h5py.File): A h5py File object that holds RAMON data
anno_id (int): The ID of the RAMON obj to extract from the file. This
defaults to the first one (sorted) if none is specified.
Returns:
ndio.RAMON object
"""
if anno_id is None:
# The user just wants the first item we find, so... Yeah.
return from_hdf5(hdf5, list(hdf5.keys())[0]) # depends on [control=['if'], data=[]]
# First, get the actual object we're going to download.
anno_id = str(anno_id)
if anno_id not in list(hdf5.keys()):
raise ValueError('ID {} is not in this file. Options are: {}'.format(anno_id, ', '.join(list(hdf5.keys())))) # depends on [control=['if'], data=['anno_id']]
anno = hdf5[anno_id]
# anno now holds just the RAMON of interest
# This is the most complicated line in here: It creates an object whose
# type is conditional on the ANNOTATION_TYPE of the hdf5 object.
try:
r = AnnotationType.get_class(anno['ANNOTATION_TYPE'][0])() # depends on [control=['try'], data=[]]
except:
raise InvalidRAMONError('This is not a valid RAMON type.') # depends on [control=['except'], data=[]]
# All RAMON types definitely have these attributes:
metadata = anno['METADATA']
r.author = metadata['AUTHOR'][0]
r.confidence = metadata['CONFIDENCE'][0]
r.status = metadata['STATUS'][0]
r.id = anno_id
# These are a little tougher, some RAMON types have special attributes:
if type(r) in [RAMONNeuron, RAMONSynapse]:
r.segments = metadata['SEGMENTS'][()] # depends on [control=['if'], data=[]]
if 'KVPAIRS' in metadata:
kvs = metadata['KVPAIRS'][()][0].split()
if len(kvs) != 0:
for i in kvs:
(k, v) = str(i).split(',')
r.kvpairs[str(k)] = str(v) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
r.kvpairs = {} # depends on [control=['if'], data=['metadata']]
if issubclass(type(r), RAMONVolume):
if 'CUTOUT' in anno:
r.cutout = anno['CUTOUT'][()] # depends on [control=['if'], data=['anno']]
if 'XYZOFFSET' in anno:
r.cutout = anno['XYZOFFSET'][()] # depends on [control=['if'], data=['anno']]
if 'RESOLUTION' in anno:
r.cutout = anno['RESOLUTION'][()] # depends on [control=['if'], data=['anno']] # depends on [control=['if'], data=[]]
if type(r) is RAMONSynapse:
r.synapse_type = metadata['SYNAPSE_TYPE'][0]
r.weight = metadata['WEIGHT'][0] # depends on [control=['if'], data=[]]
if type(r) is RAMONSegment:
if 'NEURON' in metadata:
r.neuron = metadata['NEURON'][0] # depends on [control=['if'], data=['metadata']]
if 'PARENTSEED' in metadata:
r.parent_seed = metadata['PARENTSEED'][0] # depends on [control=['if'], data=['metadata']]
if 'SEGMENTCLASS' in metadata:
r.segmentclass = metadata['SEGMENTCLASS'][0] # depends on [control=['if'], data=['metadata']]
if 'SYNAPSES' in metadata:
r.synapses = metadata['SYNAPSES'][()] # depends on [control=['if'], data=['metadata']]
if 'ORGANELLES' in metadata:
r.organelles = metadata['ORGANELLES'][()] # depends on [control=['if'], data=['metadata']] # depends on [control=['if'], data=[]]
if type(r) is RAMONOrganelle:
r.organelle_class = metadata['ORGANELLECLASS'][0] # depends on [control=['if'], data=[]]
return r |
def remove_link(self, obj, attr=None):
"""
removes link from obj.attr
"""
name = repr(self)
if not name:
return self
l = self.__class__._get_links()
v = WeakAttrLink(None, obj) if attr is None else WeakAttrLink(obj, attr)
if name in l:
if v in l[name]:
l[name].remove(v)
if not l[name]:
l.pop(name)
return self | def function[remove_link, parameter[self, obj, attr]]:
constant[
removes link from obj.attr
]
variable[name] assign[=] call[name[repr], parameter[name[self]]]
if <ast.UnaryOp object at 0x7da20c7c9270> begin[:]
return[name[self]]
variable[l] assign[=] call[name[self].__class__._get_links, parameter[]]
variable[v] assign[=] <ast.IfExp object at 0x7da18f813190>
if compare[name[name] in name[l]] begin[:]
if compare[name[v] in call[name[l]][name[name]]] begin[:]
call[call[name[l]][name[name]].remove, parameter[name[v]]]
if <ast.UnaryOp object at 0x7da20cabec50> begin[:]
call[name[l].pop, parameter[name[name]]]
return[name[self]] | keyword[def] identifier[remove_link] ( identifier[self] , identifier[obj] , identifier[attr] = keyword[None] ):
literal[string]
identifier[name] = identifier[repr] ( identifier[self] )
keyword[if] keyword[not] identifier[name] :
keyword[return] identifier[self]
identifier[l] = identifier[self] . identifier[__class__] . identifier[_get_links] ()
identifier[v] = identifier[WeakAttrLink] ( keyword[None] , identifier[obj] ) keyword[if] identifier[attr] keyword[is] keyword[None] keyword[else] identifier[WeakAttrLink] ( identifier[obj] , identifier[attr] )
keyword[if] identifier[name] keyword[in] identifier[l] :
keyword[if] identifier[v] keyword[in] identifier[l] [ identifier[name] ]:
identifier[l] [ identifier[name] ]. identifier[remove] ( identifier[v] )
keyword[if] keyword[not] identifier[l] [ identifier[name] ]:
identifier[l] . identifier[pop] ( identifier[name] )
keyword[return] identifier[self] | def remove_link(self, obj, attr=None):
"""
removes link from obj.attr
"""
name = repr(self)
if not name:
return self # depends on [control=['if'], data=[]]
l = self.__class__._get_links()
v = WeakAttrLink(None, obj) if attr is None else WeakAttrLink(obj, attr)
if name in l:
if v in l[name]:
l[name].remove(v) # depends on [control=['if'], data=['v']]
if not l[name]:
l.pop(name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['name', 'l']]
return self |
def crosstalk_correction(pathway_definitions,
random_seed=2015,
gene_set=set(),
all_genes=True,
max_iters=1000):
"""A wrapper function around the maximum impact estimation algorithm.
Parameters
-----------
pathway_definitions : dict(str -> set(str))
The original pathway definitions.
A pathway (key) is defined by a set of genes (value).
random_seed : int (default=2015)
Sets the numpy random seed
gene_set : set(str) (default=set())
Donato et al. (2013) uses this algorithm to remove crosstalk from
definitions in case-control studies. Here, `gene_set` is equivalent
to the DE (differentially expressed) genes they refer to in their
paper. Because crosstalk removal is a preprocessing step
applicable to pathway analyses in general, we keep the variable
name nonspecific.
all_genes : bool (default=True)
This value is checked if `gene_set` is not empty.
If False, crosstalk correction is only applied to annotations in
the `gene_set`.
max_iters : int (default=1000)
The maximum number of expectation-maximization steps to take in the
maximum impact estimation algorithm.
Returns
----------
dict(str -> tup(set(str), set(str))), where the (str) keys are the
pathway names.
tup[0] : crosstalk-correction applied to genes in the pathway
definition that are also in `gene_set`
tup[1] :
- `all_genes` is True.
Correction is applied to genes outside of `gene_set`.
- `all_genes` is False.
The second element in the tuple is all genes
remaining in the original definition (definition - `gene_set`).
"""
np.random.seed(seed=random_seed)
genes_in_pathway_definitions = set.union(*pathway_definitions.values())
pathway_column_names = index_element_map(pathway_definitions.keys())
corrected_pathway_defns = {}
if gene_set:
gene_set = gene_set & genes_in_pathway_definitions
if not gene_set and not all_genes:
print("`gene_set` parameter was {0}, returning original"
"pathway definitions".format(gene_set))
for pathway, definition in pathway_definitions.items():
corrected_pathway_defns[pathway] = (set(), definition)
return pathway_definitions
corrected_pathway_defns = _apply_correction_on_genes(
gene_set, pathway_column_names, pathway_definitions)
# crosstalk correction is _only_ applied to `gene_set`
if not all_genes:
for pathway, definition in pathway_definitions.items():
if pathway not in corrected_pathway_defns:
corrected_pathway_defns[pathway] = set()
gene_set_defn = corrected_pathway_defns[pathway]
remaining_defn = definition - gene_set
corrected_pathway_defns[pathway] = (
gene_set_defn, remaining_defn)
return corrected_pathway_defns
remaining_genes = genes_in_pathway_definitions - gene_set
if not remaining_genes:
for pathway, definition in corrected_pathway_defns.items():
corrected_pathway_defns[pathway] = (definition, set())
return corrected_pathway_defns
pathway_remaining_defns = _apply_correction_on_genes(
remaining_genes, pathway_column_names, pathway_definitions)
for pathway, definitions in pathway_definitions.items():
if pathway not in corrected_pathway_defns:
corrected_pathway_defns[pathway] = set()
if pathway not in pathway_remaining_defns:
pathway_remaining_defns[pathway] = set()
corrected_pathway_defns[pathway] = (
corrected_pathway_defns[pathway],
pathway_remaining_defns[pathway])
return corrected_pathway_defns | def function[crosstalk_correction, parameter[pathway_definitions, random_seed, gene_set, all_genes, max_iters]]:
constant[A wrapper function around the maximum impact estimation algorithm.
Parameters
-----------
pathway_definitions : dict(str -> set(str))
The original pathway definitions.
A pathway (key) is defined by a set of genes (value).
random_seed : int (default=2015)
Sets the numpy random seed
gene_set : set(str) (default=set())
Donato et al. (2013) uses this algorithm to remove crosstalk from
definitions in case-control studies. Here, `gene_set` is equivalent
to the DE (differentially expressed) genes they refer to in their
paper. Because crosstalk removal is a preprocessing step
applicable to pathway analyses in general, we keep the variable
name nonspecific.
all_genes : bool (default=True)
This value is checked if `gene_set` is not empty.
If False, crosstalk correction is only applied to annotations in
the `gene_set`.
max_iters : int (default=1000)
The maximum number of expectation-maximization steps to take in the
maximum impact estimation algorithm.
Returns
----------
dict(str -> tup(set(str), set(str))), where the (str) keys are the
pathway names.
tup[0] : crosstalk-correction applied to genes in the pathway
definition that are also in `gene_set`
tup[1] :
- `all_genes` is True.
Correction is applied to genes outside of `gene_set`.
- `all_genes` is False.
The second element in the tuple is all genes
remaining in the original definition (definition - `gene_set`).
]
call[name[np].random.seed, parameter[]]
variable[genes_in_pathway_definitions] assign[=] call[name[set].union, parameter[<ast.Starred object at 0x7da18f00c4c0>]]
variable[pathway_column_names] assign[=] call[name[index_element_map], parameter[call[name[pathway_definitions].keys, parameter[]]]]
variable[corrected_pathway_defns] assign[=] dictionary[[], []]
if name[gene_set] begin[:]
variable[gene_set] assign[=] binary_operation[name[gene_set] <ast.BitAnd object at 0x7da2590d6b60> name[genes_in_pathway_definitions]]
if <ast.BoolOp object at 0x7da18f00e710> begin[:]
call[name[print], parameter[call[constant[`gene_set` parameter was {0}, returning originalpathway definitions].format, parameter[name[gene_set]]]]]
for taget[tuple[[<ast.Name object at 0x7da18f00d4b0>, <ast.Name object at 0x7da18f00f160>]]] in starred[call[name[pathway_definitions].items, parameter[]]] begin[:]
call[name[corrected_pathway_defns]][name[pathway]] assign[=] tuple[[<ast.Call object at 0x7da18f00f3d0>, <ast.Name object at 0x7da18f00d2d0>]]
return[name[pathway_definitions]]
variable[corrected_pathway_defns] assign[=] call[name[_apply_correction_on_genes], parameter[name[gene_set], name[pathway_column_names], name[pathway_definitions]]]
if <ast.UnaryOp object at 0x7da18f00ee90> begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f00ff70>, <ast.Name object at 0x7da18f00d060>]]] in starred[call[name[pathway_definitions].items, parameter[]]] begin[:]
if compare[name[pathway] <ast.NotIn object at 0x7da2590d7190> name[corrected_pathway_defns]] begin[:]
call[name[corrected_pathway_defns]][name[pathway]] assign[=] call[name[set], parameter[]]
variable[gene_set_defn] assign[=] call[name[corrected_pathway_defns]][name[pathway]]
variable[remaining_defn] assign[=] binary_operation[name[definition] - name[gene_set]]
call[name[corrected_pathway_defns]][name[pathway]] assign[=] tuple[[<ast.Name object at 0x7da2054a5690>, <ast.Name object at 0x7da2054a4fd0>]]
return[name[corrected_pathway_defns]]
variable[remaining_genes] assign[=] binary_operation[name[genes_in_pathway_definitions] - name[gene_set]]
if <ast.UnaryOp object at 0x7da2054a62c0> begin[:]
for taget[tuple[[<ast.Name object at 0x7da204565c00>, <ast.Name object at 0x7da2045655d0>]]] in starred[call[name[corrected_pathway_defns].items, parameter[]]] begin[:]
call[name[corrected_pathway_defns]][name[pathway]] assign[=] tuple[[<ast.Name object at 0x7da204564e50>, <ast.Call object at 0x7da2045645e0>]]
return[name[corrected_pathway_defns]]
variable[pathway_remaining_defns] assign[=] call[name[_apply_correction_on_genes], parameter[name[remaining_genes], name[pathway_column_names], name[pathway_definitions]]]
for taget[tuple[[<ast.Name object at 0x7da204567640>, <ast.Name object at 0x7da204565660>]]] in starred[call[name[pathway_definitions].items, parameter[]]] begin[:]
if compare[name[pathway] <ast.NotIn object at 0x7da2590d7190> name[corrected_pathway_defns]] begin[:]
call[name[corrected_pathway_defns]][name[pathway]] assign[=] call[name[set], parameter[]]
if compare[name[pathway] <ast.NotIn object at 0x7da2590d7190> name[pathway_remaining_defns]] begin[:]
call[name[pathway_remaining_defns]][name[pathway]] assign[=] call[name[set], parameter[]]
call[name[corrected_pathway_defns]][name[pathway]] assign[=] tuple[[<ast.Subscript object at 0x7da204566050>, <ast.Subscript object at 0x7da204566350>]]
return[name[corrected_pathway_defns]] | keyword[def] identifier[crosstalk_correction] ( identifier[pathway_definitions] ,
identifier[random_seed] = literal[int] ,
identifier[gene_set] = identifier[set] (),
identifier[all_genes] = keyword[True] ,
identifier[max_iters] = literal[int] ):
literal[string]
identifier[np] . identifier[random] . identifier[seed] ( identifier[seed] = identifier[random_seed] )
identifier[genes_in_pathway_definitions] = identifier[set] . identifier[union] (* identifier[pathway_definitions] . identifier[values] ())
identifier[pathway_column_names] = identifier[index_element_map] ( identifier[pathway_definitions] . identifier[keys] ())
identifier[corrected_pathway_defns] ={}
keyword[if] identifier[gene_set] :
identifier[gene_set] = identifier[gene_set] & identifier[genes_in_pathway_definitions]
keyword[if] keyword[not] identifier[gene_set] keyword[and] keyword[not] identifier[all_genes] :
identifier[print] ( literal[string]
literal[string] . identifier[format] ( identifier[gene_set] ))
keyword[for] identifier[pathway] , identifier[definition] keyword[in] identifier[pathway_definitions] . identifier[items] ():
identifier[corrected_pathway_defns] [ identifier[pathway] ]=( identifier[set] (), identifier[definition] )
keyword[return] identifier[pathway_definitions]
identifier[corrected_pathway_defns] = identifier[_apply_correction_on_genes] (
identifier[gene_set] , identifier[pathway_column_names] , identifier[pathway_definitions] )
keyword[if] keyword[not] identifier[all_genes] :
keyword[for] identifier[pathway] , identifier[definition] keyword[in] identifier[pathway_definitions] . identifier[items] ():
keyword[if] identifier[pathway] keyword[not] keyword[in] identifier[corrected_pathway_defns] :
identifier[corrected_pathway_defns] [ identifier[pathway] ]= identifier[set] ()
identifier[gene_set_defn] = identifier[corrected_pathway_defns] [ identifier[pathway] ]
identifier[remaining_defn] = identifier[definition] - identifier[gene_set]
identifier[corrected_pathway_defns] [ identifier[pathway] ]=(
identifier[gene_set_defn] , identifier[remaining_defn] )
keyword[return] identifier[corrected_pathway_defns]
identifier[remaining_genes] = identifier[genes_in_pathway_definitions] - identifier[gene_set]
keyword[if] keyword[not] identifier[remaining_genes] :
keyword[for] identifier[pathway] , identifier[definition] keyword[in] identifier[corrected_pathway_defns] . identifier[items] ():
identifier[corrected_pathway_defns] [ identifier[pathway] ]=( identifier[definition] , identifier[set] ())
keyword[return] identifier[corrected_pathway_defns]
identifier[pathway_remaining_defns] = identifier[_apply_correction_on_genes] (
identifier[remaining_genes] , identifier[pathway_column_names] , identifier[pathway_definitions] )
keyword[for] identifier[pathway] , identifier[definitions] keyword[in] identifier[pathway_definitions] . identifier[items] ():
keyword[if] identifier[pathway] keyword[not] keyword[in] identifier[corrected_pathway_defns] :
identifier[corrected_pathway_defns] [ identifier[pathway] ]= identifier[set] ()
keyword[if] identifier[pathway] keyword[not] keyword[in] identifier[pathway_remaining_defns] :
identifier[pathway_remaining_defns] [ identifier[pathway] ]= identifier[set] ()
identifier[corrected_pathway_defns] [ identifier[pathway] ]=(
identifier[corrected_pathway_defns] [ identifier[pathway] ],
identifier[pathway_remaining_defns] [ identifier[pathway] ])
keyword[return] identifier[corrected_pathway_defns] | def crosstalk_correction(pathway_definitions, random_seed=2015, gene_set=set(), all_genes=True, max_iters=1000):
"""A wrapper function around the maximum impact estimation algorithm.
Parameters
-----------
pathway_definitions : dict(str -> set(str))
The original pathway definitions.
A pathway (key) is defined by a set of genes (value).
random_seed : int (default=2015)
Sets the numpy random seed
gene_set : set(str) (default=set())
Donato et al. (2013) uses this algorithm to remove crosstalk from
definitions in case-control studies. Here, `gene_set` is equivalent
to the DE (differentially expressed) genes they refer to in their
paper. Because crosstalk removal is a preprocessing step
applicable to pathway analyses in general, we keep the variable
name nonspecific.
all_genes : bool (default=True)
This value is checked if `gene_set` is not empty.
If False, crosstalk correction is only applied to annotations in
the `gene_set`.
max_iters : int (default=1000)
The maximum number of expectation-maximization steps to take in the
maximum impact estimation algorithm.
Returns
----------
dict(str -> tup(set(str), set(str))), where the (str) keys are the
pathway names.
tup[0] : crosstalk-correction applied to genes in the pathway
definition that are also in `gene_set`
tup[1] :
- `all_genes` is True.
Correction is applied to genes outside of `gene_set`.
- `all_genes` is False.
The second element in the tuple is all genes
remaining in the original definition (definition - `gene_set`).
"""
np.random.seed(seed=random_seed)
genes_in_pathway_definitions = set.union(*pathway_definitions.values())
pathway_column_names = index_element_map(pathway_definitions.keys())
corrected_pathway_defns = {}
if gene_set:
gene_set = gene_set & genes_in_pathway_definitions
if not gene_set and (not all_genes):
print('`gene_set` parameter was {0}, returning originalpathway definitions'.format(gene_set))
for (pathway, definition) in pathway_definitions.items():
corrected_pathway_defns[pathway] = (set(), definition) # depends on [control=['for'], data=[]]
return pathway_definitions # depends on [control=['if'], data=[]]
corrected_pathway_defns = _apply_correction_on_genes(gene_set, pathway_column_names, pathway_definitions)
# crosstalk correction is _only_ applied to `gene_set`
if not all_genes:
for (pathway, definition) in pathway_definitions.items():
if pathway not in corrected_pathway_defns:
corrected_pathway_defns[pathway] = set() # depends on [control=['if'], data=['pathway', 'corrected_pathway_defns']]
gene_set_defn = corrected_pathway_defns[pathway]
remaining_defn = definition - gene_set
corrected_pathway_defns[pathway] = (gene_set_defn, remaining_defn) # depends on [control=['for'], data=[]]
return corrected_pathway_defns # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
remaining_genes = genes_in_pathway_definitions - gene_set
if not remaining_genes:
for (pathway, definition) in corrected_pathway_defns.items():
corrected_pathway_defns[pathway] = (definition, set()) # depends on [control=['for'], data=[]]
return corrected_pathway_defns # depends on [control=['if'], data=[]]
pathway_remaining_defns = _apply_correction_on_genes(remaining_genes, pathway_column_names, pathway_definitions)
for (pathway, definitions) in pathway_definitions.items():
if pathway not in corrected_pathway_defns:
corrected_pathway_defns[pathway] = set() # depends on [control=['if'], data=['pathway', 'corrected_pathway_defns']]
if pathway not in pathway_remaining_defns:
pathway_remaining_defns[pathway] = set() # depends on [control=['if'], data=['pathway', 'pathway_remaining_defns']]
corrected_pathway_defns[pathway] = (corrected_pathway_defns[pathway], pathway_remaining_defns[pathway]) # depends on [control=['for'], data=[]]
return corrected_pathway_defns |
def __get_window(self, hWnd):
"""
User internally to get another Window from this one.
It'll try to copy the parent Process and Thread references if possible.
"""
window = Window(hWnd)
if window.get_pid() == self.get_pid():
window.set_process( self.get_process() )
if window.get_tid() == self.get_tid():
window.set_thread( self.get_thread() )
return window | def function[__get_window, parameter[self, hWnd]]:
constant[
User internally to get another Window from this one.
It'll try to copy the parent Process and Thread references if possible.
]
variable[window] assign[=] call[name[Window], parameter[name[hWnd]]]
if compare[call[name[window].get_pid, parameter[]] equal[==] call[name[self].get_pid, parameter[]]] begin[:]
call[name[window].set_process, parameter[call[name[self].get_process, parameter[]]]]
if compare[call[name[window].get_tid, parameter[]] equal[==] call[name[self].get_tid, parameter[]]] begin[:]
call[name[window].set_thread, parameter[call[name[self].get_thread, parameter[]]]]
return[name[window]] | keyword[def] identifier[__get_window] ( identifier[self] , identifier[hWnd] ):
literal[string]
identifier[window] = identifier[Window] ( identifier[hWnd] )
keyword[if] identifier[window] . identifier[get_pid] ()== identifier[self] . identifier[get_pid] ():
identifier[window] . identifier[set_process] ( identifier[self] . identifier[get_process] ())
keyword[if] identifier[window] . identifier[get_tid] ()== identifier[self] . identifier[get_tid] ():
identifier[window] . identifier[set_thread] ( identifier[self] . identifier[get_thread] ())
keyword[return] identifier[window] | def __get_window(self, hWnd):
"""
User internally to get another Window from this one.
It'll try to copy the parent Process and Thread references if possible.
"""
window = Window(hWnd)
if window.get_pid() == self.get_pid():
window.set_process(self.get_process()) # depends on [control=['if'], data=[]]
if window.get_tid() == self.get_tid():
window.set_thread(self.get_thread()) # depends on [control=['if'], data=[]]
return window |
def _ParseMRUListExEntryValue(
self, parser_mediator, registry_key, entry_index, entry_number,
values_dict, value_strings, parent_path_segments, codepage='cp1252'):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
values_dict (dict[str, object]): values of the key.
value_strings (dict[str, str]): value names and strings.
parent_path_segments (list[str]): parent shell item path segments.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: path segment of the shell item.
"""
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
path_segment = 'N/A'
value_string = ''
if value is None:
parser_mediator.ProduceExtractionWarning(
'Missing MRUListEx entry value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif not value.DataIsBinaryData():
parser_mediator.ProduceExtractionWarning(
'Non-binary MRUListEx entry value: {0:d} in key: {1:s}.'.format(
entry_number, registry_key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(
parser_mediator, value.data,
parent_path_segments=parent_path_segments, codepage=codepage)
path_segment = shell_items_parser.GetUpperPathSegment()
value_string = shell_items_parser.CopyToPath()
value_strings[entry_number] = value_string
value_string = 'Shell item path: {0:s}'.format(value_string)
value_text = 'Index: {0:d} [MRU Value {1:d}]'.format(
entry_index + 1, entry_number)
values_dict[value_text] = value_string
return path_segment | def function[_ParseMRUListExEntryValue, parameter[self, parser_mediator, registry_key, entry_index, entry_number, values_dict, value_strings, parent_path_segments, codepage]]:
constant[Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
values_dict (dict[str, object]): values of the key.
value_strings (dict[str, str]): value names and strings.
parent_path_segments (list[str]): parent shell item path segments.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: path segment of the shell item.
]
variable[value] assign[=] call[name[registry_key].GetValueByName, parameter[call[constant[{0:d}].format, parameter[name[entry_number]]]]]
variable[path_segment] assign[=] constant[N/A]
variable[value_string] assign[=] constant[]
if compare[name[value] is constant[None]] begin[:]
call[name[parser_mediator].ProduceExtractionWarning, parameter[call[constant[Missing MRUListEx entry value: {0:d} in key: {1:s}.].format, parameter[name[entry_number], name[registry_key].path]]]]
variable[value_text] assign[=] call[constant[Index: {0:d} [MRU Value {1:d}]].format, parameter[binary_operation[name[entry_index] + constant[1]], name[entry_number]]]
call[name[values_dict]][name[value_text]] assign[=] name[value_string]
return[name[path_segment]] | keyword[def] identifier[_ParseMRUListExEntryValue] (
identifier[self] , identifier[parser_mediator] , identifier[registry_key] , identifier[entry_index] , identifier[entry_number] ,
identifier[values_dict] , identifier[value_strings] , identifier[parent_path_segments] , identifier[codepage] = literal[string] ):
literal[string]
identifier[value] = identifier[registry_key] . identifier[GetValueByName] ( literal[string] . identifier[format] ( identifier[entry_number] ))
identifier[path_segment] = literal[string]
identifier[value_string] = literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[parser_mediator] . identifier[ProduceExtractionWarning] (
literal[string] . identifier[format] (
identifier[entry_number] , identifier[registry_key] . identifier[path] ))
keyword[elif] keyword[not] identifier[value] . identifier[DataIsBinaryData] ():
identifier[parser_mediator] . identifier[ProduceExtractionWarning] (
literal[string] . identifier[format] (
identifier[entry_number] , identifier[registry_key] . identifier[path] ))
keyword[elif] identifier[value] . identifier[data] :
identifier[shell_items_parser] = identifier[shell_items] . identifier[ShellItemsParser] ( identifier[registry_key] . identifier[path] )
identifier[shell_items_parser] . identifier[ParseByteStream] (
identifier[parser_mediator] , identifier[value] . identifier[data] ,
identifier[parent_path_segments] = identifier[parent_path_segments] , identifier[codepage] = identifier[codepage] )
identifier[path_segment] = identifier[shell_items_parser] . identifier[GetUpperPathSegment] ()
identifier[value_string] = identifier[shell_items_parser] . identifier[CopyToPath] ()
identifier[value_strings] [ identifier[entry_number] ]= identifier[value_string]
identifier[value_string] = literal[string] . identifier[format] ( identifier[value_string] )
identifier[value_text] = literal[string] . identifier[format] (
identifier[entry_index] + literal[int] , identifier[entry_number] )
identifier[values_dict] [ identifier[value_text] ]= identifier[value_string]
keyword[return] identifier[path_segment] | def _ParseMRUListExEntryValue(self, parser_mediator, registry_key, entry_index, entry_number, values_dict, value_strings, parent_path_segments, codepage='cp1252'):
"""Parses the MRUListEx entry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains
the MRUListEx value.
entry_index (int): MRUListEx entry index.
entry_number (int): entry number.
values_dict (dict[str, object]): values of the key.
value_strings (dict[str, str]): value names and strings.
parent_path_segments (list[str]): parent shell item path segments.
codepage (Optional[str]): extended ASCII string codepage.
Returns:
str: path segment of the shell item.
"""
value = registry_key.GetValueByName('{0:d}'.format(entry_number))
path_segment = 'N/A'
value_string = ''
if value is None:
parser_mediator.ProduceExtractionWarning('Missing MRUListEx entry value: {0:d} in key: {1:s}.'.format(entry_number, registry_key.path)) # depends on [control=['if'], data=[]]
elif not value.DataIsBinaryData():
parser_mediator.ProduceExtractionWarning('Non-binary MRUListEx entry value: {0:d} in key: {1:s}.'.format(entry_number, registry_key.path)) # depends on [control=['if'], data=[]]
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(registry_key.path)
shell_items_parser.ParseByteStream(parser_mediator, value.data, parent_path_segments=parent_path_segments, codepage=codepage)
path_segment = shell_items_parser.GetUpperPathSegment()
value_string = shell_items_parser.CopyToPath()
value_strings[entry_number] = value_string
value_string = 'Shell item path: {0:s}'.format(value_string) # depends on [control=['if'], data=[]]
value_text = 'Index: {0:d} [MRU Value {1:d}]'.format(entry_index + 1, entry_number)
values_dict[value_text] = value_string
return path_segment |
def set_heater(self, heater):
"""Set the heater config.
:param heater: Value to set the heater
:type heater: int [0-100]
:returns: None
:raises: InvalidInput
"""
if type(heater) != int and heater not in range(0, 101):
raise InvalidInput("Heater value must be int between 0-100")
self._config['heater'] = heater
self._q.put(self._config) | def function[set_heater, parameter[self, heater]]:
constant[Set the heater config.
:param heater: Value to set the heater
:type heater: int [0-100]
:returns: None
:raises: InvalidInput
]
if <ast.BoolOp object at 0x7da204566950> begin[:]
<ast.Raise object at 0x7da204567520>
call[name[self]._config][constant[heater]] assign[=] name[heater]
call[name[self]._q.put, parameter[name[self]._config]] | keyword[def] identifier[set_heater] ( identifier[self] , identifier[heater] ):
literal[string]
keyword[if] identifier[type] ( identifier[heater] )!= identifier[int] keyword[and] identifier[heater] keyword[not] keyword[in] identifier[range] ( literal[int] , literal[int] ):
keyword[raise] identifier[InvalidInput] ( literal[string] )
identifier[self] . identifier[_config] [ literal[string] ]= identifier[heater]
identifier[self] . identifier[_q] . identifier[put] ( identifier[self] . identifier[_config] ) | def set_heater(self, heater):
"""Set the heater config.
:param heater: Value to set the heater
:type heater: int [0-100]
:returns: None
:raises: InvalidInput
"""
if type(heater) != int and heater not in range(0, 101):
raise InvalidInput('Heater value must be int between 0-100') # depends on [control=['if'], data=[]]
self._config['heater'] = heater
self._q.put(self._config) |
def subset_gctoo(gctoo, row_bool=None, col_bool=None, rid=None, cid=None,
ridx=None, cidx=None, exclude_rid=None, exclude_cid=None):
""" Extract a subset of data from a GCToo object in a variety of ways.
The order of rows and columns will be preserved.
Args:
gctoo (GCToo object)
row_bool (list of bools): length must equal gctoo.data_df.shape[0]
col_bool (list of bools): length must equal gctoo.data_df.shape[1]
rid (list of strings): rids to include
cid (list of strings): cids to include
ridx (list of integers): row integer ids to include
cidx (list of integers): col integer ids to include
exclude_rid (list of strings): rids to exclude
exclude_cid (list of strings): cids to exclude
Returns:
out_gctoo (GCToo object): gctoo after subsetting
"""
assert sum([(rid is not None), (row_bool is not None), (ridx is not None)]) <= 1, (
"Only one of rid, row_bool, and ridx can be provided.")
assert sum([(cid is not None), (col_bool is not None), (cidx is not None)]) <= 1, (
"Only one of cid, col_bool, and cidx can be provided.")
# Figure out what rows and columns to keep
rows_to_keep = get_rows_to_keep(gctoo, rid, row_bool, ridx, exclude_rid)
cols_to_keep = get_cols_to_keep(gctoo, cid, col_bool, cidx, exclude_cid)
# Convert labels to boolean array to preserve order
rows_to_keep_bools = gctoo.data_df.index.isin(rows_to_keep)
cols_to_keep_bools = gctoo.data_df.columns.isin(cols_to_keep)
# Make the output gct
out_gctoo = GCToo.GCToo(
src=gctoo.src, version=gctoo.version,
data_df=gctoo.data_df.loc[rows_to_keep_bools, cols_to_keep_bools],
row_metadata_df=gctoo.row_metadata_df.loc[rows_to_keep_bools, :],
col_metadata_df=gctoo.col_metadata_df.loc[cols_to_keep_bools, :])
assert out_gctoo.data_df.size > 0, "Subsetting yielded an empty gct!"
logger.info(("Initial GCToo with {} rows and {} columns subsetted down to " +
"{} rows and {} columns.").format(
gctoo.data_df.shape[0], gctoo.data_df.shape[1],
out_gctoo.data_df.shape[0], out_gctoo.data_df.shape[1]))
return out_gctoo | def function[subset_gctoo, parameter[gctoo, row_bool, col_bool, rid, cid, ridx, cidx, exclude_rid, exclude_cid]]:
constant[ Extract a subset of data from a GCToo object in a variety of ways.
The order of rows and columns will be preserved.
Args:
gctoo (GCToo object)
row_bool (list of bools): length must equal gctoo.data_df.shape[0]
col_bool (list of bools): length must equal gctoo.data_df.shape[1]
rid (list of strings): rids to include
cid (list of strings): cids to include
ridx (list of integers): row integer ids to include
cidx (list of integers): col integer ids to include
exclude_rid (list of strings): rids to exclude
exclude_cid (list of strings): cids to exclude
Returns:
out_gctoo (GCToo object): gctoo after subsetting
]
assert[compare[call[name[sum], parameter[list[[<ast.Compare object at 0x7da18fe90d90>, <ast.Compare object at 0x7da18fe93610>, <ast.Compare object at 0x7da18fe91ea0>]]]] less_or_equal[<=] constant[1]]]
assert[compare[call[name[sum], parameter[list[[<ast.Compare object at 0x7da18fe93f10>, <ast.Compare object at 0x7da18fe90040>, <ast.Compare object at 0x7da18fe92860>]]]] less_or_equal[<=] constant[1]]]
variable[rows_to_keep] assign[=] call[name[get_rows_to_keep], parameter[name[gctoo], name[rid], name[row_bool], name[ridx], name[exclude_rid]]]
variable[cols_to_keep] assign[=] call[name[get_cols_to_keep], parameter[name[gctoo], name[cid], name[col_bool], name[cidx], name[exclude_cid]]]
variable[rows_to_keep_bools] assign[=] call[name[gctoo].data_df.index.isin, parameter[name[rows_to_keep]]]
variable[cols_to_keep_bools] assign[=] call[name[gctoo].data_df.columns.isin, parameter[name[cols_to_keep]]]
variable[out_gctoo] assign[=] call[name[GCToo].GCToo, parameter[]]
assert[compare[name[out_gctoo].data_df.size greater[>] constant[0]]]
call[name[logger].info, parameter[call[binary_operation[constant[Initial GCToo with {} rows and {} columns subsetted down to ] + constant[{} rows and {} columns.]].format, parameter[call[name[gctoo].data_df.shape][constant[0]], call[name[gctoo].data_df.shape][constant[1]], call[name[out_gctoo].data_df.shape][constant[0]], call[name[out_gctoo].data_df.shape][constant[1]]]]]]
return[name[out_gctoo]] | keyword[def] identifier[subset_gctoo] ( identifier[gctoo] , identifier[row_bool] = keyword[None] , identifier[col_bool] = keyword[None] , identifier[rid] = keyword[None] , identifier[cid] = keyword[None] ,
identifier[ridx] = keyword[None] , identifier[cidx] = keyword[None] , identifier[exclude_rid] = keyword[None] , identifier[exclude_cid] = keyword[None] ):
literal[string]
keyword[assert] identifier[sum] ([( identifier[rid] keyword[is] keyword[not] keyword[None] ),( identifier[row_bool] keyword[is] keyword[not] keyword[None] ),( identifier[ridx] keyword[is] keyword[not] keyword[None] )])<= literal[int] ,(
literal[string] )
keyword[assert] identifier[sum] ([( identifier[cid] keyword[is] keyword[not] keyword[None] ),( identifier[col_bool] keyword[is] keyword[not] keyword[None] ),( identifier[cidx] keyword[is] keyword[not] keyword[None] )])<= literal[int] ,(
literal[string] )
identifier[rows_to_keep] = identifier[get_rows_to_keep] ( identifier[gctoo] , identifier[rid] , identifier[row_bool] , identifier[ridx] , identifier[exclude_rid] )
identifier[cols_to_keep] = identifier[get_cols_to_keep] ( identifier[gctoo] , identifier[cid] , identifier[col_bool] , identifier[cidx] , identifier[exclude_cid] )
identifier[rows_to_keep_bools] = identifier[gctoo] . identifier[data_df] . identifier[index] . identifier[isin] ( identifier[rows_to_keep] )
identifier[cols_to_keep_bools] = identifier[gctoo] . identifier[data_df] . identifier[columns] . identifier[isin] ( identifier[cols_to_keep] )
identifier[out_gctoo] = identifier[GCToo] . identifier[GCToo] (
identifier[src] = identifier[gctoo] . identifier[src] , identifier[version] = identifier[gctoo] . identifier[version] ,
identifier[data_df] = identifier[gctoo] . identifier[data_df] . identifier[loc] [ identifier[rows_to_keep_bools] , identifier[cols_to_keep_bools] ],
identifier[row_metadata_df] = identifier[gctoo] . identifier[row_metadata_df] . identifier[loc] [ identifier[rows_to_keep_bools] ,:],
identifier[col_metadata_df] = identifier[gctoo] . identifier[col_metadata_df] . identifier[loc] [ identifier[cols_to_keep_bools] ,:])
keyword[assert] identifier[out_gctoo] . identifier[data_df] . identifier[size] > literal[int] , literal[string]
identifier[logger] . identifier[info] (( literal[string] +
literal[string] ). identifier[format] (
identifier[gctoo] . identifier[data_df] . identifier[shape] [ literal[int] ], identifier[gctoo] . identifier[data_df] . identifier[shape] [ literal[int] ],
identifier[out_gctoo] . identifier[data_df] . identifier[shape] [ literal[int] ], identifier[out_gctoo] . identifier[data_df] . identifier[shape] [ literal[int] ]))
keyword[return] identifier[out_gctoo] | def subset_gctoo(gctoo, row_bool=None, col_bool=None, rid=None, cid=None, ridx=None, cidx=None, exclude_rid=None, exclude_cid=None):
""" Extract a subset of data from a GCToo object in a variety of ways.
The order of rows and columns will be preserved.
Args:
gctoo (GCToo object)
row_bool (list of bools): length must equal gctoo.data_df.shape[0]
col_bool (list of bools): length must equal gctoo.data_df.shape[1]
rid (list of strings): rids to include
cid (list of strings): cids to include
ridx (list of integers): row integer ids to include
cidx (list of integers): col integer ids to include
exclude_rid (list of strings): rids to exclude
exclude_cid (list of strings): cids to exclude
Returns:
out_gctoo (GCToo object): gctoo after subsetting
"""
assert sum([rid is not None, row_bool is not None, ridx is not None]) <= 1, 'Only one of rid, row_bool, and ridx can be provided.'
assert sum([cid is not None, col_bool is not None, cidx is not None]) <= 1, 'Only one of cid, col_bool, and cidx can be provided.'
# Figure out what rows and columns to keep
rows_to_keep = get_rows_to_keep(gctoo, rid, row_bool, ridx, exclude_rid)
cols_to_keep = get_cols_to_keep(gctoo, cid, col_bool, cidx, exclude_cid)
# Convert labels to boolean array to preserve order
rows_to_keep_bools = gctoo.data_df.index.isin(rows_to_keep)
cols_to_keep_bools = gctoo.data_df.columns.isin(cols_to_keep)
# Make the output gct
out_gctoo = GCToo.GCToo(src=gctoo.src, version=gctoo.version, data_df=gctoo.data_df.loc[rows_to_keep_bools, cols_to_keep_bools], row_metadata_df=gctoo.row_metadata_df.loc[rows_to_keep_bools, :], col_metadata_df=gctoo.col_metadata_df.loc[cols_to_keep_bools, :])
assert out_gctoo.data_df.size > 0, 'Subsetting yielded an empty gct!'
logger.info(('Initial GCToo with {} rows and {} columns subsetted down to ' + '{} rows and {} columns.').format(gctoo.data_df.shape[0], gctoo.data_df.shape[1], out_gctoo.data_df.shape[0], out_gctoo.data_df.shape[1]))
return out_gctoo |
def pdf(cls, uuid):
"""Return a PDF of the invoice identified by the UUID
This is a raw string, which can be written to a file with:
`
with open('invoice.pdf', 'w') as invoice_file:
invoice_file.write(recurly.Invoice.pdf(uuid))
`
"""
url = urljoin(base_uri(), cls.member_path % (uuid,))
pdf_response = cls.http_request(url, headers={'Accept': 'application/pdf'})
return pdf_response.read() | def function[pdf, parameter[cls, uuid]]:
constant[Return a PDF of the invoice identified by the UUID
This is a raw string, which can be written to a file with:
`
with open('invoice.pdf', 'w') as invoice_file:
invoice_file.write(recurly.Invoice.pdf(uuid))
`
]
variable[url] assign[=] call[name[urljoin], parameter[call[name[base_uri], parameter[]], binary_operation[name[cls].member_path <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0301990>]]]]]
variable[pdf_response] assign[=] call[name[cls].http_request, parameter[name[url]]]
return[call[name[pdf_response].read, parameter[]]] | keyword[def] identifier[pdf] ( identifier[cls] , identifier[uuid] ):
literal[string]
identifier[url] = identifier[urljoin] ( identifier[base_uri] (), identifier[cls] . identifier[member_path] %( identifier[uuid] ,))
identifier[pdf_response] = identifier[cls] . identifier[http_request] ( identifier[url] , identifier[headers] ={ literal[string] : literal[string] })
keyword[return] identifier[pdf_response] . identifier[read] () | def pdf(cls, uuid):
"""Return a PDF of the invoice identified by the UUID
This is a raw string, which can be written to a file with:
`
with open('invoice.pdf', 'w') as invoice_file:
invoice_file.write(recurly.Invoice.pdf(uuid))
`
"""
url = urljoin(base_uri(), cls.member_path % (uuid,))
pdf_response = cls.http_request(url, headers={'Accept': 'application/pdf'})
return pdf_response.read() |
def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_philips(dicom_input)
if common.is_multiframe_dicom(dicom_input):
_assert_explicit_vr(dicom_input)
logger.info('Found multiframe dicom')
if _is_multiframe_4d(dicom_input):
logger.info('Found sequence type: MULTIFRAME 4D')
return _multiframe_to_nifti(dicom_input, output_file)
if _is_multiframe_anatomical(dicom_input):
logger.info('Found sequence type: MULTIFRAME ANATOMICAL')
return _multiframe_to_nifti(dicom_input, output_file)
else:
logger.info('Found singleframe dicom')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_singleframe_4d(dicom_input):
logger.info('Found sequence type: SINGLEFRAME 4D')
return _singleframe_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) | def function[dicom_to_nifti, parameter[dicom_input, output_file]]:
constant[
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
]
assert[call[name[common].is_philips, parameter[name[dicom_input]]]]
if call[name[common].is_multiframe_dicom, parameter[name[dicom_input]]] begin[:]
call[name[_assert_explicit_vr], parameter[name[dicom_input]]]
call[name[logger].info, parameter[constant[Found multiframe dicom]]]
if call[name[_is_multiframe_4d], parameter[name[dicom_input]]] begin[:]
call[name[logger].info, parameter[constant[Found sequence type: MULTIFRAME 4D]]]
return[call[name[_multiframe_to_nifti], parameter[name[dicom_input], name[output_file]]]]
if call[name[_is_multiframe_anatomical], parameter[name[dicom_input]]] begin[:]
call[name[logger].info, parameter[constant[Found sequence type: MULTIFRAME ANATOMICAL]]]
return[call[name[_multiframe_to_nifti], parameter[name[dicom_input], name[output_file]]]]
call[name[logger].info, parameter[constant[Assuming anatomical data]]]
return[call[name[convert_generic].dicom_to_nifti, parameter[name[dicom_input], name[output_file]]]] | keyword[def] identifier[dicom_to_nifti] ( identifier[dicom_input] , identifier[output_file] = keyword[None] ):
literal[string]
keyword[assert] identifier[common] . identifier[is_philips] ( identifier[dicom_input] )
keyword[if] identifier[common] . identifier[is_multiframe_dicom] ( identifier[dicom_input] ):
identifier[_assert_explicit_vr] ( identifier[dicom_input] )
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[_is_multiframe_4d] ( identifier[dicom_input] ):
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] identifier[_multiframe_to_nifti] ( identifier[dicom_input] , identifier[output_file] )
keyword[if] identifier[_is_multiframe_anatomical] ( identifier[dicom_input] ):
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] identifier[_multiframe_to_nifti] ( identifier[dicom_input] , identifier[output_file] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[grouped_dicoms] = identifier[_get_grouped_dicoms] ( identifier[dicom_input] )
keyword[if] identifier[_is_singleframe_4d] ( identifier[dicom_input] ):
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] identifier[_singleframe_to_nifti] ( identifier[grouped_dicoms] , identifier[output_file] )
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] identifier[convert_generic] . identifier[dicom_to_nifti] ( identifier[dicom_input] , identifier[output_file] ) | def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_philips(dicom_input)
if common.is_multiframe_dicom(dicom_input):
_assert_explicit_vr(dicom_input)
logger.info('Found multiframe dicom')
if _is_multiframe_4d(dicom_input):
logger.info('Found sequence type: MULTIFRAME 4D')
return _multiframe_to_nifti(dicom_input, output_file) # depends on [control=['if'], data=[]]
if _is_multiframe_anatomical(dicom_input):
logger.info('Found sequence type: MULTIFRAME ANATOMICAL')
return _multiframe_to_nifti(dicom_input, output_file) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
logger.info('Found singleframe dicom')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_singleframe_4d(dicom_input):
logger.info('Found sequence type: SINGLEFRAME 4D')
return _singleframe_to_nifti(grouped_dicoms, output_file) # depends on [control=['if'], data=[]]
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) |
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func) | def function[_call_syndic, parameter[self, func, args, kwargs, master_id]]:
constant[
Wrapper to call a given func on a syndic, best effort to get the one you asked for
]
if compare[name[kwargs] is constant[None]] begin[:]
variable[kwargs] assign[=] dictionary[[], []]
variable[successful] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b215d510>, <ast.Name object at 0x7da1b215dba0>]]] in starred[call[name[self].iter_master_options, parameter[name[master_id]]]] begin[:]
if <ast.BoolOp object at 0x7da1b215e8f0> begin[:]
call[name[log].error, parameter[constant[Unable to call %s on %s, that syndic is not connected], name[func], name[master]]]
continue
<ast.Try object at 0x7da1b215ed40>
if <ast.UnaryOp object at 0x7da1b215dd50> begin[:]
call[name[log].critical, parameter[constant[Unable to call %s on any masters!], name[func]]] | keyword[def] identifier[_call_syndic] ( identifier[self] , identifier[func] , identifier[args] =(), identifier[kwargs] = keyword[None] , identifier[master_id] = keyword[None] ):
literal[string]
keyword[if] identifier[kwargs] keyword[is] keyword[None] :
identifier[kwargs] ={}
identifier[successful] = keyword[False]
keyword[for] identifier[master] , identifier[syndic_future] keyword[in] identifier[self] . identifier[iter_master_options] ( identifier[master_id] ):
keyword[if] keyword[not] identifier[syndic_future] . identifier[done] () keyword[or] identifier[syndic_future] . identifier[exception] ():
identifier[log] . identifier[error] (
literal[string] ,
identifier[func] , identifier[master]
)
keyword[continue]
keyword[try] :
identifier[getattr] ( identifier[syndic_future] . identifier[result] (), identifier[func] )(* identifier[args] ,** identifier[kwargs] )
identifier[successful] = keyword[True]
keyword[except] identifier[SaltClientError] :
identifier[log] . identifier[error] (
literal[string] ,
identifier[func] , identifier[master]
)
identifier[self] . identifier[_mark_master_dead] ( identifier[master] )
keyword[if] keyword[not] identifier[successful] :
identifier[log] . identifier[critical] ( literal[string] , identifier[func] ) | def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
"""
Wrapper to call a given func on a syndic, best effort to get the one you asked for
"""
if kwargs is None:
kwargs = {} # depends on [control=['if'], data=['kwargs']]
successful = False
# Call for each master
for (master, syndic_future) in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call %s on %s, that syndic is not connected', func, master)
continue # depends on [control=['if'], data=[]]
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True # depends on [control=['try'], data=[]]
except SaltClientError:
log.error('Unable to call %s on %s, trying another...', func, master)
self._mark_master_dead(master) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
if not successful:
log.critical('Unable to call %s on any masters!', func) # depends on [control=['if'], data=[]] |
def plus_one_v1(digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits[-1] = digits[-1] + 1
res = []
ten = 0
i = len(digits)-1
while i >= 0 or ten == 1:
summ = 0
if i >= 0:
summ += digits[i]
if ten:
summ += 1
res.append(summ % 10)
ten = summ // 10
i -= 1
return res[::-1] | def function[plus_one_v1, parameter[digits]]:
constant[
:type digits: List[int]
:rtype: List[int]
]
call[name[digits]][<ast.UnaryOp object at 0x7da1b20784c0>] assign[=] binary_operation[call[name[digits]][<ast.UnaryOp object at 0x7da1b2078a60>] + constant[1]]
variable[res] assign[=] list[[]]
variable[ten] assign[=] constant[0]
variable[i] assign[=] binary_operation[call[name[len], parameter[name[digits]]] - constant[1]]
while <ast.BoolOp object at 0x7da1b2078190> begin[:]
variable[summ] assign[=] constant[0]
if compare[name[i] greater_or_equal[>=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b1e11b40>
if name[ten] begin[:]
<ast.AugAssign object at 0x7da1b1eacb80>
call[name[res].append, parameter[binary_operation[name[summ] <ast.Mod object at 0x7da2590d6920> constant[10]]]]
variable[ten] assign[=] binary_operation[name[summ] <ast.FloorDiv object at 0x7da2590d6bc0> constant[10]]
<ast.AugAssign object at 0x7da1b20f87c0>
return[call[name[res]][<ast.Slice object at 0x7da1b20f92a0>]] | keyword[def] identifier[plus_one_v1] ( identifier[digits] ):
literal[string]
identifier[digits] [- literal[int] ]= identifier[digits] [- literal[int] ]+ literal[int]
identifier[res] =[]
identifier[ten] = literal[int]
identifier[i] = identifier[len] ( identifier[digits] )- literal[int]
keyword[while] identifier[i] >= literal[int] keyword[or] identifier[ten] == literal[int] :
identifier[summ] = literal[int]
keyword[if] identifier[i] >= literal[int] :
identifier[summ] += identifier[digits] [ identifier[i] ]
keyword[if] identifier[ten] :
identifier[summ] += literal[int]
identifier[res] . identifier[append] ( identifier[summ] % literal[int] )
identifier[ten] = identifier[summ] // literal[int]
identifier[i] -= literal[int]
keyword[return] identifier[res] [::- literal[int] ] | def plus_one_v1(digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
digits[-1] = digits[-1] + 1
res = []
ten = 0
i = len(digits) - 1
while i >= 0 or ten == 1:
summ = 0
if i >= 0:
summ += digits[i] # depends on [control=['if'], data=['i']]
if ten:
summ += 1 # depends on [control=['if'], data=[]]
res.append(summ % 10)
ten = summ // 10
i -= 1 # depends on [control=['while'], data=[]]
return res[::-1] |
def unlock(self):
"""Unlock this Workspace."""
r = fapi.unlock_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 204)
self.data['workspace']['isLocked'] = False
return self | def function[unlock, parameter[self]]:
constant[Unlock this Workspace.]
variable[r] assign[=] call[name[fapi].unlock_workspace, parameter[name[self].namespace, name[self].name, name[self].api_url]]
call[name[fapi]._check_response_code, parameter[name[r], constant[204]]]
call[call[name[self].data][constant[workspace]]][constant[isLocked]] assign[=] constant[False]
return[name[self]] | keyword[def] identifier[unlock] ( identifier[self] ):
literal[string]
identifier[r] = identifier[fapi] . identifier[unlock_workspace] ( identifier[self] . identifier[namespace] , identifier[self] . identifier[name] , identifier[self] . identifier[api_url] )
identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] )
identifier[self] . identifier[data] [ literal[string] ][ literal[string] ]= keyword[False]
keyword[return] identifier[self] | def unlock(self):
"""Unlock this Workspace."""
r = fapi.unlock_workspace(self.namespace, self.name, self.api_url)
fapi._check_response_code(r, 204)
self.data['workspace']['isLocked'] = False
return self |
def sort_merge(in_file, data, out_dir=None):
"""Sort and merge a BED file, collapsing gene names.
Output is a 3 or 4 column file (the 4th column values go comma-separated).
"""
out_file = "%s-sortmerge.bed" % os.path.splitext(in_file)[0]
bedtools = config_utils.get_program("bedtools", data, default="bedtools")
if out_dir:
out_file = os.path.join(out_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
column_opt = ""
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith(("#", "track", "browser", "@")):
parts = line.split()
if len(parts) >= 4:
column_opt = "-c 4 -o distinct"
with file_transaction(data, out_file) as tx_out_file:
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
sort_cmd = get_sort_cmd(os.path.dirname(tx_out_file))
cmd = ("{cat_cmd} {in_file} | {sort_cmd} -k1,1 -k2,2n | "
"{bedtools} merge -i - {column_opt} > {tx_out_file}")
do.run(cmd.format(**locals()), "Sort and merge BED file", data)
return out_file | def function[sort_merge, parameter[in_file, data, out_dir]]:
constant[Sort and merge a BED file, collapsing gene names.
Output is a 3 or 4 column file (the 4th column values go comma-separated).
]
variable[out_file] assign[=] binary_operation[constant[%s-sortmerge.bed] <ast.Mod object at 0x7da2590d6920> call[call[name[os].path.splitext, parameter[name[in_file]]]][constant[0]]]
variable[bedtools] assign[=] call[name[config_utils].get_program, parameter[constant[bedtools], name[data]]]
if name[out_dir] begin[:]
variable[out_file] assign[=] call[name[os].path.join, parameter[name[out_dir], call[name[os].path.basename, parameter[name[out_file]]]]]
if <ast.UnaryOp object at 0x7da1b17a50c0> begin[:]
variable[column_opt] assign[=] constant[]
with call[name[utils].open_gzipsafe, parameter[name[in_file]]] begin[:]
for taget[name[line]] in starred[name[in_handle]] begin[:]
if <ast.UnaryOp object at 0x7da1b18d2710> begin[:]
variable[parts] assign[=] call[name[line].split, parameter[]]
if compare[call[name[len], parameter[name[parts]]] greater_or_equal[>=] constant[4]] begin[:]
variable[column_opt] assign[=] constant[-c 4 -o distinct]
with call[name[file_transaction], parameter[name[data], name[out_file]]] begin[:]
variable[cat_cmd] assign[=] <ast.IfExp object at 0x7da1b18955a0>
variable[sort_cmd] assign[=] call[name[get_sort_cmd], parameter[call[name[os].path.dirname, parameter[name[tx_out_file]]]]]
variable[cmd] assign[=] constant[{cat_cmd} {in_file} | {sort_cmd} -k1,1 -k2,2n | {bedtools} merge -i - {column_opt} > {tx_out_file}]
call[name[do].run, parameter[call[name[cmd].format, parameter[]], constant[Sort and merge BED file], name[data]]]
return[name[out_file]] | keyword[def] identifier[sort_merge] ( identifier[in_file] , identifier[data] , identifier[out_dir] = keyword[None] ):
literal[string]
identifier[out_file] = literal[string] % identifier[os] . identifier[path] . identifier[splitext] ( identifier[in_file] )[ literal[int] ]
identifier[bedtools] = identifier[config_utils] . identifier[get_program] ( literal[string] , identifier[data] , identifier[default] = literal[string] )
keyword[if] identifier[out_dir] :
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , identifier[os] . identifier[path] . identifier[basename] ( identifier[out_file] ))
keyword[if] keyword[not] identifier[utils] . identifier[file_uptodate] ( identifier[out_file] , identifier[in_file] ):
identifier[column_opt] = literal[string]
keyword[with] identifier[utils] . identifier[open_gzipsafe] ( identifier[in_file] ) keyword[as] identifier[in_handle] :
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
keyword[if] keyword[not] identifier[line] . identifier[startswith] (( literal[string] , literal[string] , literal[string] , literal[string] )):
identifier[parts] = identifier[line] . identifier[split] ()
keyword[if] identifier[len] ( identifier[parts] )>= literal[int] :
identifier[column_opt] = literal[string]
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[out_file] ) keyword[as] identifier[tx_out_file] :
identifier[cat_cmd] = literal[string] keyword[if] identifier[in_file] . identifier[endswith] ( literal[string] ) keyword[else] literal[string]
identifier[sort_cmd] = identifier[get_sort_cmd] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[tx_out_file] ))
identifier[cmd] =( literal[string]
literal[string] )
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] , identifier[data] )
keyword[return] identifier[out_file] | def sort_merge(in_file, data, out_dir=None):
"""Sort and merge a BED file, collapsing gene names.
Output is a 3 or 4 column file (the 4th column values go comma-separated).
"""
out_file = '%s-sortmerge.bed' % os.path.splitext(in_file)[0]
bedtools = config_utils.get_program('bedtools', data, default='bedtools')
if out_dir:
out_file = os.path.join(out_dir, os.path.basename(out_file)) # depends on [control=['if'], data=[]]
if not utils.file_uptodate(out_file, in_file):
column_opt = ''
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith(('#', 'track', 'browser', '@')):
parts = line.split()
if len(parts) >= 4:
column_opt = '-c 4 -o distinct' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['in_handle']]
with file_transaction(data, out_file) as tx_out_file:
cat_cmd = 'zcat' if in_file.endswith('.gz') else 'cat'
sort_cmd = get_sort_cmd(os.path.dirname(tx_out_file))
cmd = '{cat_cmd} {in_file} | {sort_cmd} -k1,1 -k2,2n | {bedtools} merge -i - {column_opt} > {tx_out_file}'
do.run(cmd.format(**locals()), 'Sort and merge BED file', data) # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
return out_file |
def from_regular_array(self, A):
"""
Converts from an array of type `int` where the last index
is assumed to have length `self.n_elements` to an array
of type `self.d_type` with one fewer index.
:param np.ndarray A: An `np.array` of type `int`.
:rtype: `np.ndarray`
"""
dims = A.shape[:-1]
return A.reshape((np.prod(dims),-1)).view(dtype=self.dtype).squeeze(-1).reshape(dims) | def function[from_regular_array, parameter[self, A]]:
constant[
Converts from an array of type `int` where the last index
is assumed to have length `self.n_elements` to an array
of type `self.d_type` with one fewer index.
:param np.ndarray A: An `np.array` of type `int`.
:rtype: `np.ndarray`
]
variable[dims] assign[=] call[name[A].shape][<ast.Slice object at 0x7da18f812560>]
return[call[call[call[call[name[A].reshape, parameter[tuple[[<ast.Call object at 0x7da18f812080>, <ast.UnaryOp object at 0x7da18f810640>]]]].view, parameter[]].squeeze, parameter[<ast.UnaryOp object at 0x7da18f810dc0>]].reshape, parameter[name[dims]]]] | keyword[def] identifier[from_regular_array] ( identifier[self] , identifier[A] ):
literal[string]
identifier[dims] = identifier[A] . identifier[shape] [:- literal[int] ]
keyword[return] identifier[A] . identifier[reshape] (( identifier[np] . identifier[prod] ( identifier[dims] ),- literal[int] )). identifier[view] ( identifier[dtype] = identifier[self] . identifier[dtype] ). identifier[squeeze] (- literal[int] ). identifier[reshape] ( identifier[dims] ) | def from_regular_array(self, A):
"""
Converts from an array of type `int` where the last index
is assumed to have length `self.n_elements` to an array
of type `self.d_type` with one fewer index.
:param np.ndarray A: An `np.array` of type `int`.
:rtype: `np.ndarray`
"""
dims = A.shape[:-1]
return A.reshape((np.prod(dims), -1)).view(dtype=self.dtype).squeeze(-1).reshape(dims) |
def header_class_for_version(cls, version):
"""
>>> HeaderFactory.header_class_for_version(2.0)
Traceback (most recent call last):
...
pylas.errors.FileVersionNotSupported: 2.0
>>> HeaderFactory.header_class_for_version(1.2)
<class 'pylas.headers.rawheader.RawHeader1_2'>
>>> header_class = HeaderFactory.header_class_for_version(1.4)
>>> header_class()
<LasHeader(1.4)>
"""
try:
return cls._version_to_header[str(version)]
except KeyError:
raise errors.FileVersionNotSupported(version) | def function[header_class_for_version, parameter[cls, version]]:
constant[
>>> HeaderFactory.header_class_for_version(2.0)
Traceback (most recent call last):
...
pylas.errors.FileVersionNotSupported: 2.0
>>> HeaderFactory.header_class_for_version(1.2)
<class 'pylas.headers.rawheader.RawHeader1_2'>
>>> header_class = HeaderFactory.header_class_for_version(1.4)
>>> header_class()
<LasHeader(1.4)>
]
<ast.Try object at 0x7da1b025fe80> | keyword[def] identifier[header_class_for_version] ( identifier[cls] , identifier[version] ):
literal[string]
keyword[try] :
keyword[return] identifier[cls] . identifier[_version_to_header] [ identifier[str] ( identifier[version] )]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[errors] . identifier[FileVersionNotSupported] ( identifier[version] ) | def header_class_for_version(cls, version):
"""
>>> HeaderFactory.header_class_for_version(2.0)
Traceback (most recent call last):
...
pylas.errors.FileVersionNotSupported: 2.0
>>> HeaderFactory.header_class_for_version(1.2)
<class 'pylas.headers.rawheader.RawHeader1_2'>
>>> header_class = HeaderFactory.header_class_for_version(1.4)
>>> header_class()
<LasHeader(1.4)>
"""
try:
return cls._version_to_header[str(version)] # depends on [control=['try'], data=[]]
except KeyError:
raise errors.FileVersionNotSupported(version) # depends on [control=['except'], data=[]] |
def icnr(x, scale=2, init=nn.init.kaiming_normal_):
"ICNR init of `x`, with `scale` and `init` function."
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
x.data.copy_(k) | def function[icnr, parameter[x, scale, init]]:
constant[ICNR init of `x`, with `scale` and `init` function.]
<ast.Tuple object at 0x7da1b202bd60> assign[=] name[x].shape
variable[ni2] assign[=] call[name[int], parameter[binary_operation[name[ni] / binary_operation[name[scale] ** constant[2]]]]]
variable[k] assign[=] call[call[name[init], parameter[call[name[torch].zeros, parameter[list[[<ast.Name object at 0x7da1b202ae00>, <ast.Name object at 0x7da1b20280d0>, <ast.Name object at 0x7da1b2028f40>, <ast.Name object at 0x7da1b202b7f0>]]]]]].transpose, parameter[constant[0], constant[1]]]
variable[k] assign[=] call[call[name[k].contiguous, parameter[]].view, parameter[name[ni2], name[nf], <ast.UnaryOp object at 0x7da1b2029b10>]]
variable[k] assign[=] call[name[k].repeat, parameter[constant[1], constant[1], binary_operation[name[scale] ** constant[2]]]]
variable[k] assign[=] call[call[call[name[k].contiguous, parameter[]].view, parameter[list[[<ast.Name object at 0x7da1b202afe0>, <ast.Name object at 0x7da1b2029ff0>, <ast.Name object at 0x7da1b20286d0>, <ast.Name object at 0x7da1b2029990>]]]].transpose, parameter[constant[0], constant[1]]]
call[name[x].data.copy_, parameter[name[k]]] | keyword[def] identifier[icnr] ( identifier[x] , identifier[scale] = literal[int] , identifier[init] = identifier[nn] . identifier[init] . identifier[kaiming_normal_] ):
literal[string]
identifier[ni] , identifier[nf] , identifier[h] , identifier[w] = identifier[x] . identifier[shape]
identifier[ni2] = identifier[int] ( identifier[ni] /( identifier[scale] ** literal[int] ))
identifier[k] = identifier[init] ( identifier[torch] . identifier[zeros] ([ identifier[ni2] , identifier[nf] , identifier[h] , identifier[w] ])). identifier[transpose] ( literal[int] , literal[int] )
identifier[k] = identifier[k] . identifier[contiguous] (). identifier[view] ( identifier[ni2] , identifier[nf] ,- literal[int] )
identifier[k] = identifier[k] . identifier[repeat] ( literal[int] , literal[int] , identifier[scale] ** literal[int] )
identifier[k] = identifier[k] . identifier[contiguous] (). identifier[view] ([ identifier[nf] , identifier[ni] , identifier[h] , identifier[w] ]). identifier[transpose] ( literal[int] , literal[int] )
identifier[x] . identifier[data] . identifier[copy_] ( identifier[k] ) | def icnr(x, scale=2, init=nn.init.kaiming_normal_):
"""ICNR init of `x`, with `scale` and `init` function."""
(ni, nf, h, w) = x.shape
ni2 = int(ni / scale ** 2)
k = init(torch.zeros([ni2, nf, h, w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale ** 2)
k = k.contiguous().view([nf, ni, h, w]).transpose(0, 1)
x.data.copy_(k) |
def get_mutually_unstable_correlation_triples(graph: BELGraph) -> Iterable[NodeTriple]:
"""Yield triples of nodes (A, B, C) such that ``A neg B``, ``B neg C``, and ``C neg A``."""
cg = get_correlation_graph(graph)
for a, b, c in get_correlation_triangles(cg):
if all(NEGATIVE_CORRELATION in x for x in (cg[a][b], cg[b][c], cg[a][c])):
yield a, b, c | def function[get_mutually_unstable_correlation_triples, parameter[graph]]:
constant[Yield triples of nodes (A, B, C) such that ``A neg B``, ``B neg C``, and ``C neg A``.]
variable[cg] assign[=] call[name[get_correlation_graph], parameter[name[graph]]]
for taget[tuple[[<ast.Name object at 0x7da1b00d9090>, <ast.Name object at 0x7da1b00d8520>, <ast.Name object at 0x7da1b00da830>]]] in starred[call[name[get_correlation_triangles], parameter[name[cg]]]] begin[:]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b00db9d0>]] begin[:]
<ast.Yield object at 0x7da1b00d8a90> | keyword[def] identifier[get_mutually_unstable_correlation_triples] ( identifier[graph] : identifier[BELGraph] )-> identifier[Iterable] [ identifier[NodeTriple] ]:
literal[string]
identifier[cg] = identifier[get_correlation_graph] ( identifier[graph] )
keyword[for] identifier[a] , identifier[b] , identifier[c] keyword[in] identifier[get_correlation_triangles] ( identifier[cg] ):
keyword[if] identifier[all] ( identifier[NEGATIVE_CORRELATION] keyword[in] identifier[x] keyword[for] identifier[x] keyword[in] ( identifier[cg] [ identifier[a] ][ identifier[b] ], identifier[cg] [ identifier[b] ][ identifier[c] ], identifier[cg] [ identifier[a] ][ identifier[c] ])):
keyword[yield] identifier[a] , identifier[b] , identifier[c] | def get_mutually_unstable_correlation_triples(graph: BELGraph) -> Iterable[NodeTriple]:
"""Yield triples of nodes (A, B, C) such that ``A neg B``, ``B neg C``, and ``C neg A``."""
cg = get_correlation_graph(graph)
for (a, b, c) in get_correlation_triangles(cg):
if all((NEGATIVE_CORRELATION in x for x in (cg[a][b], cg[b][c], cg[a][c]))):
yield (a, b, c) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def list(self, pagination=True, page_size=None, page=None, **queryparams):
"""
Retrieves a list of objects.
By default uses local cache and remote pagination
If pagination is used and no page is requested (the default), all the
remote objects are retrieved and appended in a single list.
If pagination is disabled, all the objects are fetched from the
endpoint and returned. This may trigger some parsing error if the
result set is very large.
:param pagination: Use pagination (default: `True`)
:param page_size: Size of the pagination page (default: `100`).
Any non numeric value will be casted to the
default value
:param page: Page number to retrieve (default: `None`). Ignored if
`pagination` is `False`
:param queryparams: Additional filter parameters as accepted by the
remote API
:return: <SearchableList>
"""
if page_size and pagination:
try:
page_size = int(page_size)
except (ValueError, TypeError):
page_size = 100
queryparams['page_size'] = page_size
result = self.requester.get(
self.instance.endpoint, query=queryparams, paginate=pagination
)
objects = SearchableList()
objects.extend(self.parse_list(result.json()))
if result.headers.get('X-Pagination-Next', False) and not page:
next_page = 2
else:
next_page = None
while next_page:
pageparams = queryparams.copy()
pageparams['page'] = next_page
result = self.requester.get(
self.instance.endpoint, query=pageparams,
)
objects.extend(self.parse_list(result.json()))
if result.headers.get('X-Pagination-Next', False):
next_page += 1
else:
next_page = None
return objects | def function[list, parameter[self, pagination, page_size, page]]:
constant[
Retrieves a list of objects.
By default uses local cache and remote pagination
If pagination is used and no page is requested (the default), all the
remote objects are retrieved and appended in a single list.
If pagination is disabled, all the objects are fetched from the
endpoint and returned. This may trigger some parsing error if the
result set is very large.
:param pagination: Use pagination (default: `True`)
:param page_size: Size of the pagination page (default: `100`).
Any non numeric value will be casted to the
default value
:param page: Page number to retrieve (default: `None`). Ignored if
`pagination` is `False`
:param queryparams: Additional filter parameters as accepted by the
remote API
:return: <SearchableList>
]
if <ast.BoolOp object at 0x7da207f9b040> begin[:]
<ast.Try object at 0x7da207f9ae90>
call[name[queryparams]][constant[page_size]] assign[=] name[page_size]
variable[result] assign[=] call[name[self].requester.get, parameter[name[self].instance.endpoint]]
variable[objects] assign[=] call[name[SearchableList], parameter[]]
call[name[objects].extend, parameter[call[name[self].parse_list, parameter[call[name[result].json, parameter[]]]]]]
if <ast.BoolOp object at 0x7da18bccb7c0> begin[:]
variable[next_page] assign[=] constant[2]
while name[next_page] begin[:]
variable[pageparams] assign[=] call[name[queryparams].copy, parameter[]]
call[name[pageparams]][constant[page]] assign[=] name[next_page]
variable[result] assign[=] call[name[self].requester.get, parameter[name[self].instance.endpoint]]
call[name[objects].extend, parameter[call[name[self].parse_list, parameter[call[name[result].json, parameter[]]]]]]
if call[name[result].headers.get, parameter[constant[X-Pagination-Next], constant[False]]] begin[:]
<ast.AugAssign object at 0x7da18f00d7b0>
return[name[objects]] | keyword[def] identifier[list] ( identifier[self] , identifier[pagination] = keyword[True] , identifier[page_size] = keyword[None] , identifier[page] = keyword[None] ,** identifier[queryparams] ):
literal[string]
keyword[if] identifier[page_size] keyword[and] identifier[pagination] :
keyword[try] :
identifier[page_size] = identifier[int] ( identifier[page_size] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
identifier[page_size] = literal[int]
identifier[queryparams] [ literal[string] ]= identifier[page_size]
identifier[result] = identifier[self] . identifier[requester] . identifier[get] (
identifier[self] . identifier[instance] . identifier[endpoint] , identifier[query] = identifier[queryparams] , identifier[paginate] = identifier[pagination]
)
identifier[objects] = identifier[SearchableList] ()
identifier[objects] . identifier[extend] ( identifier[self] . identifier[parse_list] ( identifier[result] . identifier[json] ()))
keyword[if] identifier[result] . identifier[headers] . identifier[get] ( literal[string] , keyword[False] ) keyword[and] keyword[not] identifier[page] :
identifier[next_page] = literal[int]
keyword[else] :
identifier[next_page] = keyword[None]
keyword[while] identifier[next_page] :
identifier[pageparams] = identifier[queryparams] . identifier[copy] ()
identifier[pageparams] [ literal[string] ]= identifier[next_page]
identifier[result] = identifier[self] . identifier[requester] . identifier[get] (
identifier[self] . identifier[instance] . identifier[endpoint] , identifier[query] = identifier[pageparams] ,
)
identifier[objects] . identifier[extend] ( identifier[self] . identifier[parse_list] ( identifier[result] . identifier[json] ()))
keyword[if] identifier[result] . identifier[headers] . identifier[get] ( literal[string] , keyword[False] ):
identifier[next_page] += literal[int]
keyword[else] :
identifier[next_page] = keyword[None]
keyword[return] identifier[objects] | def list(self, pagination=True, page_size=None, page=None, **queryparams):
"""
Retrieves a list of objects.
By default uses local cache and remote pagination
If pagination is used and no page is requested (the default), all the
remote objects are retrieved and appended in a single list.
If pagination is disabled, all the objects are fetched from the
endpoint and returned. This may trigger some parsing error if the
result set is very large.
:param pagination: Use pagination (default: `True`)
:param page_size: Size of the pagination page (default: `100`).
Any non numeric value will be casted to the
default value
:param page: Page number to retrieve (default: `None`). Ignored if
`pagination` is `False`
:param queryparams: Additional filter parameters as accepted by the
remote API
:return: <SearchableList>
"""
if page_size and pagination:
try:
page_size = int(page_size) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
page_size = 100 # depends on [control=['except'], data=[]]
queryparams['page_size'] = page_size # depends on [control=['if'], data=[]]
result = self.requester.get(self.instance.endpoint, query=queryparams, paginate=pagination)
objects = SearchableList()
objects.extend(self.parse_list(result.json()))
if result.headers.get('X-Pagination-Next', False) and (not page):
next_page = 2 # depends on [control=['if'], data=[]]
else:
next_page = None
while next_page:
pageparams = queryparams.copy()
pageparams['page'] = next_page
result = self.requester.get(self.instance.endpoint, query=pageparams)
objects.extend(self.parse_list(result.json()))
if result.headers.get('X-Pagination-Next', False):
next_page += 1 # depends on [control=['if'], data=[]]
else:
next_page = None # depends on [control=['while'], data=[]]
return objects |
def init_output_formatters(output_verbosity='normal', stderr=sys.stderr, logfile=None, debug_logfile=None):
"""
Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return:
"""
if output_verbosity not in console_verbosity_options:
raise ValueError('output_verbosity must be one of: %s' % console_verbosity_options.keys())
# Initialize debug log file, 'anchore-debug.log'. This log has stack-traces and is expected to be human read
# and intended for developers and debugging, not an operational log.
# Configure stderr behavior. All errors go to screen
stderr_handler = logging.StreamHandler(stderr)
if output_verbosity == 'quiet':
stderr_handler.setLevel(level='ERROR')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('ERROR') # Allow all at top level, filter specifics for each handler
elif output_verbosity == 'normal':
# The specific console logger
stderr_handler.setLevel('INFO')
stderr_formatter = NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT)
stderr_handler.setFormatter(stderr_formatter)
stderr_handler.addFilter(LoggerNamePrefixFilter(prefix='anchore', non_match_loglevel='ERROR'))
logging.root.setLevel('INFO')
elif output_verbosity == 'verbose':
stderr_handler.setLevel('INFO')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('INFO')
elif output_verbosity == 'debug':
stderr_handler.setLevel(level='DEBUG')
stderr_handler.setFormatter(logging.Formatter(fmt=DEBUG_FORMAT))
logging.root.setLevel('DEBUG')
logging.root.addHandler(stderr_handler)
if debug_logfile:
debug_filehandler = logging.FileHandler(debug_logfile)
debug_filehandler.setLevel('DEBUG')
formatter = logging.Formatter(fmt=DEBUG_LOGFILE_FORMAT)
debug_filehandler.setFormatter(formatter)
logging.root.addHandler(debug_filehandler)
logging.root.setLevel('DEBUG')
if logfile:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel('INFO')
filehandler.setFormatter(NoTracebackFormatter(fmt=LOGFILE_FORMAT, err_fmt=LOGFILE_FORMAT))
logging.root.addHandler(filehandler) | def function[init_output_formatters, parameter[output_verbosity, stderr, logfile, debug_logfile]]:
constant[
Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return:
]
if compare[name[output_verbosity] <ast.NotIn object at 0x7da2590d7190> name[console_verbosity_options]] begin[:]
<ast.Raise object at 0x7da1b0b47250>
variable[stderr_handler] assign[=] call[name[logging].StreamHandler, parameter[name[stderr]]]
if compare[name[output_verbosity] equal[==] constant[quiet]] begin[:]
call[name[stderr_handler].setLevel, parameter[]]
call[name[stderr_handler].setFormatter, parameter[call[name[NoTracebackFormatter], parameter[]]]]
call[name[logging].root.setLevel, parameter[constant[ERROR]]]
call[name[logging].root.addHandler, parameter[name[stderr_handler]]]
if name[debug_logfile] begin[:]
variable[debug_filehandler] assign[=] call[name[logging].FileHandler, parameter[name[debug_logfile]]]
call[name[debug_filehandler].setLevel, parameter[constant[DEBUG]]]
variable[formatter] assign[=] call[name[logging].Formatter, parameter[]]
call[name[debug_filehandler].setFormatter, parameter[name[formatter]]]
call[name[logging].root.addHandler, parameter[name[debug_filehandler]]]
call[name[logging].root.setLevel, parameter[constant[DEBUG]]]
if name[logfile] begin[:]
variable[filehandler] assign[=] call[name[logging].FileHandler, parameter[name[logfile]]]
call[name[filehandler].setLevel, parameter[constant[INFO]]]
call[name[filehandler].setFormatter, parameter[call[name[NoTracebackFormatter], parameter[]]]]
call[name[logging].root.addHandler, parameter[name[filehandler]]] | keyword[def] identifier[init_output_formatters] ( identifier[output_verbosity] = literal[string] , identifier[stderr] = identifier[sys] . identifier[stderr] , identifier[logfile] = keyword[None] , identifier[debug_logfile] = keyword[None] ):
literal[string]
keyword[if] identifier[output_verbosity] keyword[not] keyword[in] identifier[console_verbosity_options] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[console_verbosity_options] . identifier[keys] ())
identifier[stderr_handler] = identifier[logging] . identifier[StreamHandler] ( identifier[stderr] )
keyword[if] identifier[output_verbosity] == literal[string] :
identifier[stderr_handler] . identifier[setLevel] ( identifier[level] = literal[string] )
identifier[stderr_handler] . identifier[setFormatter] ( identifier[NoTracebackFormatter] ( identifier[fmt] = identifier[NORMAL_FORMAT] , identifier[err_fmt] = identifier[ERR_FORMAT] ))
identifier[logging] . identifier[root] . identifier[setLevel] ( literal[string] )
keyword[elif] identifier[output_verbosity] == literal[string] :
identifier[stderr_handler] . identifier[setLevel] ( literal[string] )
identifier[stderr_formatter] = identifier[NoTracebackFormatter] ( identifier[fmt] = identifier[NORMAL_FORMAT] , identifier[err_fmt] = identifier[ERR_FORMAT] )
identifier[stderr_handler] . identifier[setFormatter] ( identifier[stderr_formatter] )
identifier[stderr_handler] . identifier[addFilter] ( identifier[LoggerNamePrefixFilter] ( identifier[prefix] = literal[string] , identifier[non_match_loglevel] = literal[string] ))
identifier[logging] . identifier[root] . identifier[setLevel] ( literal[string] )
keyword[elif] identifier[output_verbosity] == literal[string] :
identifier[stderr_handler] . identifier[setLevel] ( literal[string] )
identifier[stderr_handler] . identifier[setFormatter] ( identifier[NoTracebackFormatter] ( identifier[fmt] = identifier[NORMAL_FORMAT] , identifier[err_fmt] = identifier[ERR_FORMAT] ))
identifier[logging] . identifier[root] . identifier[setLevel] ( literal[string] )
keyword[elif] identifier[output_verbosity] == literal[string] :
identifier[stderr_handler] . identifier[setLevel] ( identifier[level] = literal[string] )
identifier[stderr_handler] . identifier[setFormatter] ( identifier[logging] . identifier[Formatter] ( identifier[fmt] = identifier[DEBUG_FORMAT] ))
identifier[logging] . identifier[root] . identifier[setLevel] ( literal[string] )
identifier[logging] . identifier[root] . identifier[addHandler] ( identifier[stderr_handler] )
keyword[if] identifier[debug_logfile] :
identifier[debug_filehandler] = identifier[logging] . identifier[FileHandler] ( identifier[debug_logfile] )
identifier[debug_filehandler] . identifier[setLevel] ( literal[string] )
identifier[formatter] = identifier[logging] . identifier[Formatter] ( identifier[fmt] = identifier[DEBUG_LOGFILE_FORMAT] )
identifier[debug_filehandler] . identifier[setFormatter] ( identifier[formatter] )
identifier[logging] . identifier[root] . identifier[addHandler] ( identifier[debug_filehandler] )
identifier[logging] . identifier[root] . identifier[setLevel] ( literal[string] )
keyword[if] identifier[logfile] :
identifier[filehandler] = identifier[logging] . identifier[FileHandler] ( identifier[logfile] )
identifier[filehandler] . identifier[setLevel] ( literal[string] )
identifier[filehandler] . identifier[setFormatter] ( identifier[NoTracebackFormatter] ( identifier[fmt] = identifier[LOGFILE_FORMAT] , identifier[err_fmt] = identifier[LOGFILE_FORMAT] ))
identifier[logging] . identifier[root] . identifier[addHandler] ( identifier[filehandler] ) | def init_output_formatters(output_verbosity='normal', stderr=sys.stderr, logfile=None, debug_logfile=None):
"""
Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return:
"""
if output_verbosity not in console_verbosity_options:
raise ValueError('output_verbosity must be one of: %s' % console_verbosity_options.keys()) # depends on [control=['if'], data=['console_verbosity_options']]
# Initialize debug log file, 'anchore-debug.log'. This log has stack-traces and is expected to be human read
# and intended for developers and debugging, not an operational log.
# Configure stderr behavior. All errors go to screen
stderr_handler = logging.StreamHandler(stderr)
if output_verbosity == 'quiet':
stderr_handler.setLevel(level='ERROR')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('ERROR') # Allow all at top level, filter specifics for each handler # depends on [control=['if'], data=[]]
elif output_verbosity == 'normal':
# The specific console logger
stderr_handler.setLevel('INFO')
stderr_formatter = NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT)
stderr_handler.setFormatter(stderr_formatter)
stderr_handler.addFilter(LoggerNamePrefixFilter(prefix='anchore', non_match_loglevel='ERROR'))
logging.root.setLevel('INFO') # depends on [control=['if'], data=[]]
elif output_verbosity == 'verbose':
stderr_handler.setLevel('INFO')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('INFO') # depends on [control=['if'], data=[]]
elif output_verbosity == 'debug':
stderr_handler.setLevel(level='DEBUG')
stderr_handler.setFormatter(logging.Formatter(fmt=DEBUG_FORMAT))
logging.root.setLevel('DEBUG') # depends on [control=['if'], data=[]]
logging.root.addHandler(stderr_handler)
if debug_logfile:
debug_filehandler = logging.FileHandler(debug_logfile)
debug_filehandler.setLevel('DEBUG')
formatter = logging.Formatter(fmt=DEBUG_LOGFILE_FORMAT)
debug_filehandler.setFormatter(formatter)
logging.root.addHandler(debug_filehandler)
logging.root.setLevel('DEBUG') # depends on [control=['if'], data=[]]
if logfile:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel('INFO')
filehandler.setFormatter(NoTracebackFormatter(fmt=LOGFILE_FORMAT, err_fmt=LOGFILE_FORMAT))
logging.root.addHandler(filehandler) # depends on [control=['if'], data=[]] |
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
obs = BinnedAnalysis.BinnedObs(irfs=args.irfs,
expCube=args.expcube,
srcMaps=args.cmap,
binnedExpMap=args.bexpmap)
if args.no_psf:
performConvolution = False
else:
performConvolution = True
config = BinnedAnalysis.BinnedConfig(performConvolution=performConvolution)
like = BinnedAnalysis.BinnedAnalysis(obs,
optimizer='MINUIT',
srcModel=GtSrcmapsDiffuse.NULL_MODEL,
wmap=None,
config=config)
source_factory = pyLike.SourceFactory(obs.observation)
source_factory.readXml(args.srcmdl, BinnedAnalysis._funcFactory,
False, True, True)
source = source_factory.releaseSource(args.source)
try:
diffuse_source = pyLike.DiffuseSource.cast(source)
except TypeError:
diffuse_source = None
if diffuse_source is not None:
try:
diffuse_source.mapBaseObject().projmap().setExtrapolation(False)
except RuntimeError:
pass
like.logLike.saveSourceMap_partial(args.outfile, source, args.kmin, args.kmax)
if args.gzip:
os.system("gzip -9 %s" % args.outfile) | def function[run_analysis, parameter[self, argv]]:
constant[Run this analysis]
variable[args] assign[=] call[name[self]._parser.parse_args, parameter[name[argv]]]
variable[obs] assign[=] call[name[BinnedAnalysis].BinnedObs, parameter[]]
if name[args].no_psf begin[:]
variable[performConvolution] assign[=] constant[False]
variable[config] assign[=] call[name[BinnedAnalysis].BinnedConfig, parameter[]]
variable[like] assign[=] call[name[BinnedAnalysis].BinnedAnalysis, parameter[name[obs]]]
variable[source_factory] assign[=] call[name[pyLike].SourceFactory, parameter[name[obs].observation]]
call[name[source_factory].readXml, parameter[name[args].srcmdl, name[BinnedAnalysis]._funcFactory, constant[False], constant[True], constant[True]]]
variable[source] assign[=] call[name[source_factory].releaseSource, parameter[name[args].source]]
<ast.Try object at 0x7da2044c0f40>
if compare[name[diffuse_source] is_not constant[None]] begin[:]
<ast.Try object at 0x7da2044c1ed0>
call[name[like].logLike.saveSourceMap_partial, parameter[name[args].outfile, name[source], name[args].kmin, name[args].kmax]]
if name[args].gzip begin[:]
call[name[os].system, parameter[binary_operation[constant[gzip -9 %s] <ast.Mod object at 0x7da2590d6920> name[args].outfile]]] | keyword[def] identifier[run_analysis] ( identifier[self] , identifier[argv] ):
literal[string]
identifier[args] = identifier[self] . identifier[_parser] . identifier[parse_args] ( identifier[argv] )
identifier[obs] = identifier[BinnedAnalysis] . identifier[BinnedObs] ( identifier[irfs] = identifier[args] . identifier[irfs] ,
identifier[expCube] = identifier[args] . identifier[expcube] ,
identifier[srcMaps] = identifier[args] . identifier[cmap] ,
identifier[binnedExpMap] = identifier[args] . identifier[bexpmap] )
keyword[if] identifier[args] . identifier[no_psf] :
identifier[performConvolution] = keyword[False]
keyword[else] :
identifier[performConvolution] = keyword[True]
identifier[config] = identifier[BinnedAnalysis] . identifier[BinnedConfig] ( identifier[performConvolution] = identifier[performConvolution] )
identifier[like] = identifier[BinnedAnalysis] . identifier[BinnedAnalysis] ( identifier[obs] ,
identifier[optimizer] = literal[string] ,
identifier[srcModel] = identifier[GtSrcmapsDiffuse] . identifier[NULL_MODEL] ,
identifier[wmap] = keyword[None] ,
identifier[config] = identifier[config] )
identifier[source_factory] = identifier[pyLike] . identifier[SourceFactory] ( identifier[obs] . identifier[observation] )
identifier[source_factory] . identifier[readXml] ( identifier[args] . identifier[srcmdl] , identifier[BinnedAnalysis] . identifier[_funcFactory] ,
keyword[False] , keyword[True] , keyword[True] )
identifier[source] = identifier[source_factory] . identifier[releaseSource] ( identifier[args] . identifier[source] )
keyword[try] :
identifier[diffuse_source] = identifier[pyLike] . identifier[DiffuseSource] . identifier[cast] ( identifier[source] )
keyword[except] identifier[TypeError] :
identifier[diffuse_source] = keyword[None]
keyword[if] identifier[diffuse_source] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[diffuse_source] . identifier[mapBaseObject] (). identifier[projmap] (). identifier[setExtrapolation] ( keyword[False] )
keyword[except] identifier[RuntimeError] :
keyword[pass]
identifier[like] . identifier[logLike] . identifier[saveSourceMap_partial] ( identifier[args] . identifier[outfile] , identifier[source] , identifier[args] . identifier[kmin] , identifier[args] . identifier[kmax] )
keyword[if] identifier[args] . identifier[gzip] :
identifier[os] . identifier[system] ( literal[string] % identifier[args] . identifier[outfile] ) | def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
obs = BinnedAnalysis.BinnedObs(irfs=args.irfs, expCube=args.expcube, srcMaps=args.cmap, binnedExpMap=args.bexpmap)
if args.no_psf:
performConvolution = False # depends on [control=['if'], data=[]]
else:
performConvolution = True
config = BinnedAnalysis.BinnedConfig(performConvolution=performConvolution)
like = BinnedAnalysis.BinnedAnalysis(obs, optimizer='MINUIT', srcModel=GtSrcmapsDiffuse.NULL_MODEL, wmap=None, config=config)
source_factory = pyLike.SourceFactory(obs.observation)
source_factory.readXml(args.srcmdl, BinnedAnalysis._funcFactory, False, True, True)
source = source_factory.releaseSource(args.source)
try:
diffuse_source = pyLike.DiffuseSource.cast(source) # depends on [control=['try'], data=[]]
except TypeError:
diffuse_source = None # depends on [control=['except'], data=[]]
if diffuse_source is not None:
try:
diffuse_source.mapBaseObject().projmap().setExtrapolation(False) # depends on [control=['try'], data=[]]
except RuntimeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['diffuse_source']]
like.logLike.saveSourceMap_partial(args.outfile, source, args.kmin, args.kmax)
if args.gzip:
os.system('gzip -9 %s' % args.outfile) # depends on [control=['if'], data=[]] |
def save(name, filter=False):
'''
Save the register to <salt cachedir>/thorium/saves/<name>, or to an
absolute path.
If an absolute path is specified, then the directory will be created
non-recursively if it doesn't exist.
USAGE:
.. code-block:: yaml
foo:
file.save
/tmp/foo:
file.save
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if name.startswith('/'):
tgt_dir = os.path.dirname(name)
fn_ = name
else:
tgt_dir = os.path.join(__opts__['cachedir'], 'thorium', 'saves')
fn_ = os.path.join(tgt_dir, name)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
with salt.utils.files.fopen(fn_, 'w+') as fp_:
if filter is True:
salt.utils.json.dump(salt.utils.data.simple_types_filter(__reg__), fp_)
else:
salt.utils.json.dump(__reg__, fp_)
return ret | def function[save, parameter[name, filter]]:
constant[
Save the register to <salt cachedir>/thorium/saves/<name>, or to an
absolute path.
If an absolute path is specified, then the directory will be created
non-recursively if it doesn't exist.
USAGE:
.. code-block:: yaml
foo:
file.save
/tmp/foo:
file.save
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ad5d0>, <ast.Constant object at 0x7da1b26ac100>, <ast.Constant object at 0x7da1b26ac1f0>, <ast.Constant object at 0x7da1b26ad270>], [<ast.Name object at 0x7da1b26af430>, <ast.Dict object at 0x7da1b26afa00>, <ast.Constant object at 0x7da1b26aefe0>, <ast.Constant object at 0x7da1b26ae4d0>]]
if call[name[name].startswith, parameter[constant[/]]] begin[:]
variable[tgt_dir] assign[=] call[name[os].path.dirname, parameter[name[name]]]
variable[fn_] assign[=] name[name]
if <ast.UnaryOp object at 0x7da1b26afac0> begin[:]
call[name[os].makedirs, parameter[name[tgt_dir]]]
with call[name[salt].utils.files.fopen, parameter[name[fn_], constant[w+]]] begin[:]
if compare[name[filter] is constant[True]] begin[:]
call[name[salt].utils.json.dump, parameter[call[name[salt].utils.data.simple_types_filter, parameter[name[__reg__]]], name[fp_]]]
return[name[ret]] | keyword[def] identifier[save] ( identifier[name] , identifier[filter] = keyword[False] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : literal[string] ,
literal[string] : keyword[True] }
keyword[if] identifier[name] . identifier[startswith] ( literal[string] ):
identifier[tgt_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[name] )
identifier[fn_] = identifier[name]
keyword[else] :
identifier[tgt_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[__opts__] [ literal[string] ], literal[string] , literal[string] )
identifier[fn_] = identifier[os] . identifier[path] . identifier[join] ( identifier[tgt_dir] , identifier[name] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[tgt_dir] ):
identifier[os] . identifier[makedirs] ( identifier[tgt_dir] )
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[fn_] , literal[string] ) keyword[as] identifier[fp_] :
keyword[if] identifier[filter] keyword[is] keyword[True] :
identifier[salt] . identifier[utils] . identifier[json] . identifier[dump] ( identifier[salt] . identifier[utils] . identifier[data] . identifier[simple_types_filter] ( identifier[__reg__] ), identifier[fp_] )
keyword[else] :
identifier[salt] . identifier[utils] . identifier[json] . identifier[dump] ( identifier[__reg__] , identifier[fp_] )
keyword[return] identifier[ret] | def save(name, filter=False):
"""
Save the register to <salt cachedir>/thorium/saves/<name>, or to an
absolute path.
If an absolute path is specified, then the directory will be created
non-recursively if it doesn't exist.
USAGE:
.. code-block:: yaml
foo:
file.save
/tmp/foo:
file.save
"""
ret = {'name': name, 'changes': {}, 'comment': '', 'result': True}
if name.startswith('/'):
tgt_dir = os.path.dirname(name)
fn_ = name # depends on [control=['if'], data=[]]
else:
tgt_dir = os.path.join(__opts__['cachedir'], 'thorium', 'saves')
fn_ = os.path.join(tgt_dir, name)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir) # depends on [control=['if'], data=[]]
with salt.utils.files.fopen(fn_, 'w+') as fp_:
if filter is True:
salt.utils.json.dump(salt.utils.data.simple_types_filter(__reg__), fp_) # depends on [control=['if'], data=[]]
else:
salt.utils.json.dump(__reg__, fp_) # depends on [control=['with'], data=['fp_']]
return ret |
def perf_stats(returns, factor_returns=None, positions=None,
transactions=None, turnover_denom='AGB'):
"""
Calculates various performance metrics of a strategy, for use in
plotting.show_perf_stats.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
turnover_denom : str
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
Returns
-------
pd.Series
Performance metrics.
"""
stats = pd.Series()
for stat_func in SIMPLE_STAT_FUNCS:
stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns)
if positions is not None:
stats['Gross leverage'] = gross_lev(positions).mean()
if transactions is not None:
stats['Daily turnover'] = get_turnover(positions,
transactions,
turnover_denom).mean()
if factor_returns is not None:
for stat_func in FACTOR_STAT_FUNCS:
res = stat_func(returns, factor_returns)
stats[STAT_FUNC_NAMES[stat_func.__name__]] = res
return stats | def function[perf_stats, parameter[returns, factor_returns, positions, transactions, turnover_denom]]:
constant[
Calculates various performance metrics of a strategy, for use in
plotting.show_perf_stats.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
turnover_denom : str
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
Returns
-------
pd.Series
Performance metrics.
]
variable[stats] assign[=] call[name[pd].Series, parameter[]]
for taget[name[stat_func]] in starred[name[SIMPLE_STAT_FUNCS]] begin[:]
call[name[stats]][call[name[STAT_FUNC_NAMES]][name[stat_func].__name__]] assign[=] call[name[stat_func], parameter[name[returns]]]
if compare[name[positions] is_not constant[None]] begin[:]
call[name[stats]][constant[Gross leverage]] assign[=] call[call[name[gross_lev], parameter[name[positions]]].mean, parameter[]]
if compare[name[transactions] is_not constant[None]] begin[:]
call[name[stats]][constant[Daily turnover]] assign[=] call[call[name[get_turnover], parameter[name[positions], name[transactions], name[turnover_denom]]].mean, parameter[]]
if compare[name[factor_returns] is_not constant[None]] begin[:]
for taget[name[stat_func]] in starred[name[FACTOR_STAT_FUNCS]] begin[:]
variable[res] assign[=] call[name[stat_func], parameter[name[returns], name[factor_returns]]]
call[name[stats]][call[name[STAT_FUNC_NAMES]][name[stat_func].__name__]] assign[=] name[res]
return[name[stats]] | keyword[def] identifier[perf_stats] ( identifier[returns] , identifier[factor_returns] = keyword[None] , identifier[positions] = keyword[None] ,
identifier[transactions] = keyword[None] , identifier[turnover_denom] = literal[string] ):
literal[string]
identifier[stats] = identifier[pd] . identifier[Series] ()
keyword[for] identifier[stat_func] keyword[in] identifier[SIMPLE_STAT_FUNCS] :
identifier[stats] [ identifier[STAT_FUNC_NAMES] [ identifier[stat_func] . identifier[__name__] ]]= identifier[stat_func] ( identifier[returns] )
keyword[if] identifier[positions] keyword[is] keyword[not] keyword[None] :
identifier[stats] [ literal[string] ]= identifier[gross_lev] ( identifier[positions] ). identifier[mean] ()
keyword[if] identifier[transactions] keyword[is] keyword[not] keyword[None] :
identifier[stats] [ literal[string] ]= identifier[get_turnover] ( identifier[positions] ,
identifier[transactions] ,
identifier[turnover_denom] ). identifier[mean] ()
keyword[if] identifier[factor_returns] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[stat_func] keyword[in] identifier[FACTOR_STAT_FUNCS] :
identifier[res] = identifier[stat_func] ( identifier[returns] , identifier[factor_returns] )
identifier[stats] [ identifier[STAT_FUNC_NAMES] [ identifier[stat_func] . identifier[__name__] ]]= identifier[res]
keyword[return] identifier[stats] | def perf_stats(returns, factor_returns=None, positions=None, transactions=None, turnover_denom='AGB'):
"""
Calculates various performance metrics of a strategy, for use in
plotting.show_perf_stats.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- This is in the same style as returns.
- If None, do not compute alpha, beta, and information ratio.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
turnover_denom : str
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
Returns
-------
pd.Series
Performance metrics.
"""
stats = pd.Series()
for stat_func in SIMPLE_STAT_FUNCS:
stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns) # depends on [control=['for'], data=['stat_func']]
if positions is not None:
stats['Gross leverage'] = gross_lev(positions).mean()
if transactions is not None:
stats['Daily turnover'] = get_turnover(positions, transactions, turnover_denom).mean() # depends on [control=['if'], data=['transactions']] # depends on [control=['if'], data=['positions']]
if factor_returns is not None:
for stat_func in FACTOR_STAT_FUNCS:
res = stat_func(returns, factor_returns)
stats[STAT_FUNC_NAMES[stat_func.__name__]] = res # depends on [control=['for'], data=['stat_func']] # depends on [control=['if'], data=['factor_returns']]
return stats |
def symmetric_difference(self, other):
"""
Returns a new :class:`FrameSet` that contains all the elements in either
`self` or `other`, but not both.
Args:
other (:class:`FrameSet`):
Returns:
:class:`FrameSet`:
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
from_frozenset = self.items.symmetric_difference(other.items)
return self.from_iterable(from_frozenset, sort=True) | def function[symmetric_difference, parameter[self, other]]:
constant[
Returns a new :class:`FrameSet` that contains all the elements in either
`self` or `other`, but not both.
Args:
other (:class:`FrameSet`):
Returns:
:class:`FrameSet`:
]
variable[other] assign[=] call[name[self]._cast_to_frameset, parameter[name[other]]]
if compare[name[other] is name[NotImplemented]] begin[:]
return[name[NotImplemented]]
variable[from_frozenset] assign[=] call[name[self].items.symmetric_difference, parameter[name[other].items]]
return[call[name[self].from_iterable, parameter[name[from_frozenset]]]] | keyword[def] identifier[symmetric_difference] ( identifier[self] , identifier[other] ):
literal[string]
identifier[other] = identifier[self] . identifier[_cast_to_frameset] ( identifier[other] )
keyword[if] identifier[other] keyword[is] identifier[NotImplemented] :
keyword[return] identifier[NotImplemented]
identifier[from_frozenset] = identifier[self] . identifier[items] . identifier[symmetric_difference] ( identifier[other] . identifier[items] )
keyword[return] identifier[self] . identifier[from_iterable] ( identifier[from_frozenset] , identifier[sort] = keyword[True] ) | def symmetric_difference(self, other):
"""
Returns a new :class:`FrameSet` that contains all the elements in either
`self` or `other`, but not both.
Args:
other (:class:`FrameSet`):
Returns:
:class:`FrameSet`:
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented # depends on [control=['if'], data=['NotImplemented']]
from_frozenset = self.items.symmetric_difference(other.items)
return self.from_iterable(from_frozenset, sort=True) |
def entify_main(args):
'''
Main function. This function is created in this way so as to let other applications make use of the full configuration capabilities of the application.
'''
# Recovering the logger
# Calling the logger when being imported
i3visiotools.logger.setupLogger(loggerName="entify", verbosity=args.verbose, logFolder=args.logfolder)
# From now on, the logger can be recovered like this:
logger = logging.getLogger("entify")
logger.info("""entify-launcher.py Copyright (C) F. Brezo and Y. Rubio (i3visio) 2014
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions.
For details, run:
\tpython entify-launcher.py --license""")
logger.info("Selecting the regular expressions to be analysed...")
listRegexp = []
if args.regexp:
listRegexp = config.getRegexpsByName(args.regexp)
elif args.new_regexp:
for i, r in enumerate(args.new_regexp):
list.Regexp.append(RegexpObject(name = "NewRegexp"+str(i), reg_exp = args.new_regexp))
if not args.web:
results = scanFolderForRegexp(folder = args.input_folder, listRegexp= listRegexp, recursive = args.recursive, verbosity=args.verbose, logFolder= args.logfolder)
else:
results = scanResource(uri = args.web, listRegexp= listRegexp, verbosity=args.verbose, logFolder= args.logfolder)
logger.info("Printing the results:\n" + general.dictToJson(results))
if args.output_folder:
logger.info("Preparing the output folder...")
if not os.path.exists(args.output_folder):
logger.warning("The output folder \'" + args.output_folder + "\' does not exist. The system will try to create it.")
os.makedirs(args.output_folder)
logger.info("Storing the results...")
"""if "csv" in args.extension:
with open(os.path.join(args.output_folder, "results.csv"), "w") as oF:
oF.write(resultsToCSV(results))"""
if "json" in args.extension:
with open(os.path.join(args.output_folder, "results.json"), "w") as oF:
oF.write(general.dictToJson(results)) | def function[entify_main, parameter[args]]:
constant[
Main function. This function is created in this way so as to let other applications make use of the full configuration capabilities of the application.
]
call[name[i3visiotools].logger.setupLogger, parameter[]]
variable[logger] assign[=] call[name[logging].getLogger, parameter[constant[entify]]]
call[name[logger].info, parameter[constant[entify-launcher.py Copyright (C) F. Brezo and Y. Rubio (i3visio) 2014
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions.
For details, run:
python entify-launcher.py --license]]]
call[name[logger].info, parameter[constant[Selecting the regular expressions to be analysed...]]]
variable[listRegexp] assign[=] list[[]]
if name[args].regexp begin[:]
variable[listRegexp] assign[=] call[name[config].getRegexpsByName, parameter[name[args].regexp]]
if <ast.UnaryOp object at 0x7da20c6c78b0> begin[:]
variable[results] assign[=] call[name[scanFolderForRegexp], parameter[]]
call[name[logger].info, parameter[binary_operation[constant[Printing the results:
] + call[name[general].dictToJson, parameter[name[results]]]]]]
if name[args].output_folder begin[:]
call[name[logger].info, parameter[constant[Preparing the output folder...]]]
if <ast.UnaryOp object at 0x7da2054a6860> begin[:]
call[name[logger].warning, parameter[binary_operation[binary_operation[constant[The output folder '] + name[args].output_folder] + constant[' does not exist. The system will try to create it.]]]]
call[name[os].makedirs, parameter[name[args].output_folder]]
call[name[logger].info, parameter[constant[Storing the results...]]]
constant[if "csv" in args.extension:
with open(os.path.join(args.output_folder, "results.csv"), "w") as oF:
oF.write(resultsToCSV(results))]
if compare[constant[json] in name[args].extension] begin[:]
with call[name[open], parameter[call[name[os].path.join, parameter[name[args].output_folder, constant[results.json]]], constant[w]]] begin[:]
call[name[oF].write, parameter[call[name[general].dictToJson, parameter[name[results]]]]] | keyword[def] identifier[entify_main] ( identifier[args] ):
literal[string]
identifier[i3visiotools] . identifier[logger] . identifier[setupLogger] ( identifier[loggerName] = literal[string] , identifier[verbosity] = identifier[args] . identifier[verbose] , identifier[logFolder] = identifier[args] . identifier[logfolder] )
identifier[logger] = identifier[logging] . identifier[getLogger] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[listRegexp] =[]
keyword[if] identifier[args] . identifier[regexp] :
identifier[listRegexp] = identifier[config] . identifier[getRegexpsByName] ( identifier[args] . identifier[regexp] )
keyword[elif] identifier[args] . identifier[new_regexp] :
keyword[for] identifier[i] , identifier[r] keyword[in] identifier[enumerate] ( identifier[args] . identifier[new_regexp] ):
identifier[list] . identifier[Regexp] . identifier[append] ( identifier[RegexpObject] ( identifier[name] = literal[string] + identifier[str] ( identifier[i] ), identifier[reg_exp] = identifier[args] . identifier[new_regexp] ))
keyword[if] keyword[not] identifier[args] . identifier[web] :
identifier[results] = identifier[scanFolderForRegexp] ( identifier[folder] = identifier[args] . identifier[input_folder] , identifier[listRegexp] = identifier[listRegexp] , identifier[recursive] = identifier[args] . identifier[recursive] , identifier[verbosity] = identifier[args] . identifier[verbose] , identifier[logFolder] = identifier[args] . identifier[logfolder] )
keyword[else] :
identifier[results] = identifier[scanResource] ( identifier[uri] = identifier[args] . identifier[web] , identifier[listRegexp] = identifier[listRegexp] , identifier[verbosity] = identifier[args] . identifier[verbose] , identifier[logFolder] = identifier[args] . identifier[logfolder] )
identifier[logger] . identifier[info] ( literal[string] + identifier[general] . identifier[dictToJson] ( identifier[results] ))
keyword[if] identifier[args] . identifier[output_folder] :
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[args] . identifier[output_folder] ):
identifier[logger] . identifier[warning] ( literal[string] + identifier[args] . identifier[output_folder] + literal[string] )
identifier[os] . identifier[makedirs] ( identifier[args] . identifier[output_folder] )
identifier[logger] . identifier[info] ( literal[string] )
literal[string]
keyword[if] literal[string] keyword[in] identifier[args] . identifier[extension] :
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[args] . identifier[output_folder] , literal[string] ), literal[string] ) keyword[as] identifier[oF] :
identifier[oF] . identifier[write] ( identifier[general] . identifier[dictToJson] ( identifier[results] )) | def entify_main(args):
"""
Main function. This function is created in this way so as to let other applications make use of the full configuration capabilities of the application.
""" # Recovering the logger
# Calling the logger when being imported
i3visiotools.logger.setupLogger(loggerName='entify', verbosity=args.verbose, logFolder=args.logfolder) # From now on, the logger can be recovered like this:
logger = logging.getLogger('entify')
logger.info('entify-launcher.py Copyright (C) F. Brezo and Y. Rubio (i3visio) 2014\nThis program comes with ABSOLUTELY NO WARRANTY.\nThis is free software, and you are welcome to redistribute it under certain conditions.\nFor details, run:\n\tpython entify-launcher.py --license')
logger.info('Selecting the regular expressions to be analysed...')
listRegexp = []
if args.regexp:
listRegexp = config.getRegexpsByName(args.regexp) # depends on [control=['if'], data=[]]
elif args.new_regexp:
for (i, r) in enumerate(args.new_regexp):
list.Regexp.append(RegexpObject(name='NewRegexp' + str(i), reg_exp=args.new_regexp)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if not args.web:
results = scanFolderForRegexp(folder=args.input_folder, listRegexp=listRegexp, recursive=args.recursive, verbosity=args.verbose, logFolder=args.logfolder) # depends on [control=['if'], data=[]]
else:
results = scanResource(uri=args.web, listRegexp=listRegexp, verbosity=args.verbose, logFolder=args.logfolder)
logger.info('Printing the results:\n' + general.dictToJson(results))
if args.output_folder:
logger.info('Preparing the output folder...')
if not os.path.exists(args.output_folder):
logger.warning("The output folder '" + args.output_folder + "' does not exist. The system will try to create it.")
os.makedirs(args.output_folder) # depends on [control=['if'], data=[]]
logger.info('Storing the results...')
'if "csv" in args.extension:\n\t\t\twith open(os.path.join(args.output_folder, "results.csv"), "w") as oF:\n\t\t\t\toF.write(resultsToCSV(results))'
if 'json' in args.extension:
with open(os.path.join(args.output_folder, 'results.json'), 'w') as oF:
oF.write(general.dictToJson(results)) # depends on [control=['with'], data=['oF']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def list_formats(format_type, backend=None):
"""
Returns list of supported formats for a particular
backend.
"""
if backend is None:
backend = Store.current_backend
mode = Store.renderers[backend].mode if backend in Store.renderers else None
else:
split = backend.split(':')
backend, mode = split if len(split)==2 else (split[0], 'default')
if backend in Store.renderers:
return Store.renderers[backend].mode_formats[format_type][mode]
else:
return [] | def function[list_formats, parameter[format_type, backend]]:
constant[
Returns list of supported formats for a particular
backend.
]
if compare[name[backend] is constant[None]] begin[:]
variable[backend] assign[=] name[Store].current_backend
variable[mode] assign[=] <ast.IfExp object at 0x7da18f00fc70>
if compare[name[backend] in name[Store].renderers] begin[:]
return[call[call[call[name[Store].renderers][name[backend]].mode_formats][name[format_type]]][name[mode]]] | keyword[def] identifier[list_formats] ( identifier[format_type] , identifier[backend] = keyword[None] ):
literal[string]
keyword[if] identifier[backend] keyword[is] keyword[None] :
identifier[backend] = identifier[Store] . identifier[current_backend]
identifier[mode] = identifier[Store] . identifier[renderers] [ identifier[backend] ]. identifier[mode] keyword[if] identifier[backend] keyword[in] identifier[Store] . identifier[renderers] keyword[else] keyword[None]
keyword[else] :
identifier[split] = identifier[backend] . identifier[split] ( literal[string] )
identifier[backend] , identifier[mode] = identifier[split] keyword[if] identifier[len] ( identifier[split] )== literal[int] keyword[else] ( identifier[split] [ literal[int] ], literal[string] )
keyword[if] identifier[backend] keyword[in] identifier[Store] . identifier[renderers] :
keyword[return] identifier[Store] . identifier[renderers] [ identifier[backend] ]. identifier[mode_formats] [ identifier[format_type] ][ identifier[mode] ]
keyword[else] :
keyword[return] [] | def list_formats(format_type, backend=None):
"""
Returns list of supported formats for a particular
backend.
"""
if backend is None:
backend = Store.current_backend
mode = Store.renderers[backend].mode if backend in Store.renderers else None # depends on [control=['if'], data=['backend']]
else:
split = backend.split(':')
(backend, mode) = split if len(split) == 2 else (split[0], 'default')
if backend in Store.renderers:
return Store.renderers[backend].mode_formats[format_type][mode] # depends on [control=['if'], data=['backend']]
else:
return [] |
def get_evidence_by_hash(self, evidence_hash: str) -> Optional[Evidence]:
"""Look up an evidence by its hash."""
return self.session.query(Evidence).filter(Evidence.sha512 == evidence_hash).one_or_none() | def function[get_evidence_by_hash, parameter[self, evidence_hash]]:
constant[Look up an evidence by its hash.]
return[call[call[call[name[self].session.query, parameter[name[Evidence]]].filter, parameter[compare[name[Evidence].sha512 equal[==] name[evidence_hash]]]].one_or_none, parameter[]]] | keyword[def] identifier[get_evidence_by_hash] ( identifier[self] , identifier[evidence_hash] : identifier[str] )-> identifier[Optional] [ identifier[Evidence] ]:
literal[string]
keyword[return] identifier[self] . identifier[session] . identifier[query] ( identifier[Evidence] ). identifier[filter] ( identifier[Evidence] . identifier[sha512] == identifier[evidence_hash] ). identifier[one_or_none] () | def get_evidence_by_hash(self, evidence_hash: str) -> Optional[Evidence]:
"""Look up an evidence by its hash."""
return self.session.query(Evidence).filter(Evidence.sha512 == evidence_hash).one_or_none() |
def schedule_in(secs, target=None, args=(), kwargs=None):
"""insert a greenlet into the scheduler to run after a set time
If provided a function, it is wrapped in a new greenlet
:param secs: the number of seconds to wait before running the target
:type unixtime: int or float
:param target: what to schedule
:type target: function or greenlet
:param args:
arguments for the function (only used if ``target`` is a function)
:type args: tuple
:param kwargs:
keyword arguments for the function (only used if ``target`` is a
function)
:type kwargs: dict or None
:returns: the ``target`` argument
This function can also be used as a decorator:
>>> @schedule_in(30)
>>> def f():
... print 'hello from f'
and args/kwargs can also be preloaded:
>>> @schedule_in(30, args=('world',))
>>> def f(name):
... print 'hello %s' % name
"""
return schedule_at(time.time() + secs, target, args, kwargs) | def function[schedule_in, parameter[secs, target, args, kwargs]]:
constant[insert a greenlet into the scheduler to run after a set time
If provided a function, it is wrapped in a new greenlet
:param secs: the number of seconds to wait before running the target
:type unixtime: int or float
:param target: what to schedule
:type target: function or greenlet
:param args:
arguments for the function (only used if ``target`` is a function)
:type args: tuple
:param kwargs:
keyword arguments for the function (only used if ``target`` is a
function)
:type kwargs: dict or None
:returns: the ``target`` argument
This function can also be used as a decorator:
>>> @schedule_in(30)
>>> def f():
... print 'hello from f'
and args/kwargs can also be preloaded:
>>> @schedule_in(30, args=('world',))
>>> def f(name):
... print 'hello %s' % name
]
return[call[name[schedule_at], parameter[binary_operation[call[name[time].time, parameter[]] + name[secs]], name[target], name[args], name[kwargs]]]] | keyword[def] identifier[schedule_in] ( identifier[secs] , identifier[target] = keyword[None] , identifier[args] =(), identifier[kwargs] = keyword[None] ):
literal[string]
keyword[return] identifier[schedule_at] ( identifier[time] . identifier[time] ()+ identifier[secs] , identifier[target] , identifier[args] , identifier[kwargs] ) | def schedule_in(secs, target=None, args=(), kwargs=None):
"""insert a greenlet into the scheduler to run after a set time
If provided a function, it is wrapped in a new greenlet
:param secs: the number of seconds to wait before running the target
:type unixtime: int or float
:param target: what to schedule
:type target: function or greenlet
:param args:
arguments for the function (only used if ``target`` is a function)
:type args: tuple
:param kwargs:
keyword arguments for the function (only used if ``target`` is a
function)
:type kwargs: dict or None
:returns: the ``target`` argument
This function can also be used as a decorator:
>>> @schedule_in(30)
>>> def f():
... print 'hello from f'
and args/kwargs can also be preloaded:
>>> @schedule_in(30, args=('world',))
>>> def f(name):
... print 'hello %s' % name
"""
return schedule_at(time.time() + secs, target, args, kwargs) |
def task(func, **config):
"""Declare a function or method to be a Yaz task
@yaz.task
def talk(message: str = "Hello World!"):
return message
Or... group multiple tasks together
class Tools(yaz.Plugin):
@yaz.task
def say(self, message: str = "Hello World!"):
return message
@yaz.task(option__choices=["A", "B", "C"])
def choose(self, option: str = "A"):
return option
"""
if func.__name__ == func.__qualname__:
assert not func.__qualname__ in _task_list, "Can not define the same task \"{}\" twice".format(func.__qualname__)
logger.debug("Found task %s", func)
_task_list[func.__qualname__] = Task(plugin_class=None, func=func, config=config)
else:
func.yaz_task_config = config
return func | def function[task, parameter[func]]:
constant[Declare a function or method to be a Yaz task
@yaz.task
def talk(message: str = "Hello World!"):
return message
Or... group multiple tasks together
class Tools(yaz.Plugin):
@yaz.task
def say(self, message: str = "Hello World!"):
return message
@yaz.task(option__choices=["A", "B", "C"])
def choose(self, option: str = "A"):
return option
]
if compare[name[func].__name__ equal[==] name[func].__qualname__] begin[:]
assert[<ast.UnaryOp object at 0x7da1aff6fb80>]
call[name[logger].debug, parameter[constant[Found task %s], name[func]]]
call[name[_task_list]][name[func].__qualname__] assign[=] call[name[Task], parameter[]]
return[name[func]] | keyword[def] identifier[task] ( identifier[func] ,** identifier[config] ):
literal[string]
keyword[if] identifier[func] . identifier[__name__] == identifier[func] . identifier[__qualname__] :
keyword[assert] keyword[not] identifier[func] . identifier[__qualname__] keyword[in] identifier[_task_list] , literal[string] . identifier[format] ( identifier[func] . identifier[__qualname__] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[func] )
identifier[_task_list] [ identifier[func] . identifier[__qualname__] ]= identifier[Task] ( identifier[plugin_class] = keyword[None] , identifier[func] = identifier[func] , identifier[config] = identifier[config] )
keyword[else] :
identifier[func] . identifier[yaz_task_config] = identifier[config]
keyword[return] identifier[func] | def task(func, **config):
"""Declare a function or method to be a Yaz task
@yaz.task
def talk(message: str = "Hello World!"):
return message
Or... group multiple tasks together
class Tools(yaz.Plugin):
@yaz.task
def say(self, message: str = "Hello World!"):
return message
@yaz.task(option__choices=["A", "B", "C"])
def choose(self, option: str = "A"):
return option
"""
if func.__name__ == func.__qualname__:
assert not func.__qualname__ in _task_list, 'Can not define the same task "{}" twice'.format(func.__qualname__)
logger.debug('Found task %s', func)
_task_list[func.__qualname__] = Task(plugin_class=None, func=func, config=config) # depends on [control=['if'], data=[]]
else:
func.yaz_task_config = config
return func |
def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter."""
# for very long strings, provide a truncated error
if isinstance(seq, six.string_types) and obj in seq and len(seq) > 200:
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = "(truncated) ..."
else:
truncated = ""
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += "... (truncated)"
assert False, _assert_fail_message(message, obj, truncated, "is in", extra)
assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra) | def function[assert_not_in, parameter[obj, seq, message, extra]]:
constant[Raises an AssertionError if obj is in iter.]
if <ast.BoolOp object at 0x7da1b0f50c70> begin[:]
variable[index] assign[=] call[name[seq].find, parameter[name[obj]]]
variable[start_index] assign[=] binary_operation[name[index] - constant[50]]
if compare[name[start_index] greater[>] constant[0]] begin[:]
variable[truncated] assign[=] constant[(truncated) ...]
variable[end_index] assign[=] binary_operation[binary_operation[name[index] + call[name[len], parameter[name[obj]]]] + constant[50]]
<ast.AugAssign object at 0x7da1b0f50400>
if compare[name[end_index] less[<] call[name[len], parameter[name[seq]]]] begin[:]
<ast.AugAssign object at 0x7da1b0e63760>
assert[constant[False]]
assert[compare[name[obj] <ast.NotIn object at 0x7da2590d7190> name[seq]]] | keyword[def] identifier[assert_not_in] ( identifier[obj] , identifier[seq] , identifier[message] = keyword[None] , identifier[extra] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[seq] , identifier[six] . identifier[string_types] ) keyword[and] identifier[obj] keyword[in] identifier[seq] keyword[and] identifier[len] ( identifier[seq] )> literal[int] :
identifier[index] = identifier[seq] . identifier[find] ( identifier[obj] )
identifier[start_index] = identifier[index] - literal[int]
keyword[if] identifier[start_index] > literal[int] :
identifier[truncated] = literal[string]
keyword[else] :
identifier[truncated] = literal[string]
identifier[start_index] = literal[int]
identifier[end_index] = identifier[index] + identifier[len] ( identifier[obj] )+ literal[int]
identifier[truncated] += identifier[seq] [ identifier[start_index] : identifier[end_index] ]
keyword[if] identifier[end_index] < identifier[len] ( identifier[seq] ):
identifier[truncated] += literal[string]
keyword[assert] keyword[False] , identifier[_assert_fail_message] ( identifier[message] , identifier[obj] , identifier[truncated] , literal[string] , identifier[extra] )
keyword[assert] identifier[obj] keyword[not] keyword[in] identifier[seq] , identifier[_assert_fail_message] ( identifier[message] , identifier[obj] , identifier[seq] , literal[string] , identifier[extra] ) | def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter."""
# for very long strings, provide a truncated error
if isinstance(seq, six.string_types) and obj in seq and (len(seq) > 200):
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = '(truncated) ...' # depends on [control=['if'], data=[]]
else:
truncated = ''
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += '... (truncated)' # depends on [control=['if'], data=[]]
assert False, _assert_fail_message(message, obj, truncated, 'is in', extra) # depends on [control=['if'], data=[]]
assert obj not in seq, _assert_fail_message(message, obj, seq, 'is in', extra) |
def computeParams(self, params):
"""Computes the parameters depending on :math:`\lambda`.
Notes
-----
It needs to be called again if :math:`\lambda` changes during
evolution.
Parameters
----------
params:
A dictionary of the manually set parameters.
"""
self.mu = params.get("mu", int(self.lambda_ / 2))
rweights = params.get("weights", "superlinear")
if rweights == "superlinear":
self.weights = numpy.log(self.mu + 0.5) - \
numpy.log(numpy.arange(1, self.mu + 1))
elif rweights == "linear":
self.weights = self.mu + 0.5 - numpy.arange(1, self.mu + 1)
elif rweights == "equal":
self.weights = numpy.ones(self.mu)
else:
raise RuntimeError("Unknown weights : %s" % rweights)
self.weights /= sum(self.weights)
self.mueff = 1. / sum(self.weights ** 2)
self.cc = params.get("ccum", 4. / (self.dim + 4.))
self.cs = params.get("cs", (self.mueff + 2.) /
(self.dim + self.mueff + 3.))
self.ccov1 = params.get(
"ccov1", 2. / ((self.dim + 1.3) ** 2 + self.mueff))
self.ccovmu = params.get("ccovmu", 2. * (
self.mueff - 2. + 1. / self.mueff) / (
(self.dim + 2.) ** 2 + self.mueff))
self.ccovmu = min(1 - self.ccov1, self.ccovmu)
self.damps = 1. + 2. * \
max(0, numpy.sqrt((self.mueff - 1.) / (self.dim + 1.)) - 1.) + \
self.cs
self.damps = params.get("damps", self.damps)
return | def function[computeParams, parameter[self, params]]:
constant[Computes the parameters depending on :math:`\lambda`.
Notes
-----
It needs to be called again if :math:`\lambda` changes during
evolution.
Parameters
----------
params:
A dictionary of the manually set parameters.
]
name[self].mu assign[=] call[name[params].get, parameter[constant[mu], call[name[int], parameter[binary_operation[name[self].lambda_ / constant[2]]]]]]
variable[rweights] assign[=] call[name[params].get, parameter[constant[weights], constant[superlinear]]]
if compare[name[rweights] equal[==] constant[superlinear]] begin[:]
name[self].weights assign[=] binary_operation[call[name[numpy].log, parameter[binary_operation[name[self].mu + constant[0.5]]]] - call[name[numpy].log, parameter[call[name[numpy].arange, parameter[constant[1], binary_operation[name[self].mu + constant[1]]]]]]]
<ast.AugAssign object at 0x7da1b2819180>
name[self].mueff assign[=] binary_operation[constant[1.0] / call[name[sum], parameter[binary_operation[name[self].weights ** constant[2]]]]]
name[self].cc assign[=] call[name[params].get, parameter[constant[ccum], binary_operation[constant[4.0] / binary_operation[name[self].dim + constant[4.0]]]]]
name[self].cs assign[=] call[name[params].get, parameter[constant[cs], binary_operation[binary_operation[name[self].mueff + constant[2.0]] / binary_operation[binary_operation[name[self].dim + name[self].mueff] + constant[3.0]]]]]
name[self].ccov1 assign[=] call[name[params].get, parameter[constant[ccov1], binary_operation[constant[2.0] / binary_operation[binary_operation[binary_operation[name[self].dim + constant[1.3]] ** constant[2]] + name[self].mueff]]]]
name[self].ccovmu assign[=] call[name[params].get, parameter[constant[ccovmu], binary_operation[binary_operation[constant[2.0] * binary_operation[binary_operation[name[self].mueff - constant[2.0]] + binary_operation[constant[1.0] / name[self].mueff]]] / binary_operation[binary_operation[binary_operation[name[self].dim + constant[2.0]] ** constant[2]] + name[self].mueff]]]]
name[self].ccovmu assign[=] call[name[min], parameter[binary_operation[constant[1] - name[self].ccov1], name[self].ccovmu]]
name[self].damps assign[=] binary_operation[binary_operation[constant[1.0] + binary_operation[constant[2.0] * call[name[max], parameter[constant[0], binary_operation[call[name[numpy].sqrt, parameter[binary_operation[binary_operation[name[self].mueff - constant[1.0]] / binary_operation[name[self].dim + constant[1.0]]]]] - constant[1.0]]]]]] + name[self].cs]
name[self].damps assign[=] call[name[params].get, parameter[constant[damps], name[self].damps]]
return[None] | keyword[def] identifier[computeParams] ( identifier[self] , identifier[params] ):
literal[string]
identifier[self] . identifier[mu] = identifier[params] . identifier[get] ( literal[string] , identifier[int] ( identifier[self] . identifier[lambda_] / literal[int] ))
identifier[rweights] = identifier[params] . identifier[get] ( literal[string] , literal[string] )
keyword[if] identifier[rweights] == literal[string] :
identifier[self] . identifier[weights] = identifier[numpy] . identifier[log] ( identifier[self] . identifier[mu] + literal[int] )- identifier[numpy] . identifier[log] ( identifier[numpy] . identifier[arange] ( literal[int] , identifier[self] . identifier[mu] + literal[int] ))
keyword[elif] identifier[rweights] == literal[string] :
identifier[self] . identifier[weights] = identifier[self] . identifier[mu] + literal[int] - identifier[numpy] . identifier[arange] ( literal[int] , identifier[self] . identifier[mu] + literal[int] )
keyword[elif] identifier[rweights] == literal[string] :
identifier[self] . identifier[weights] = identifier[numpy] . identifier[ones] ( identifier[self] . identifier[mu] )
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[rweights] )
identifier[self] . identifier[weights] /= identifier[sum] ( identifier[self] . identifier[weights] )
identifier[self] . identifier[mueff] = literal[int] / identifier[sum] ( identifier[self] . identifier[weights] ** literal[int] )
identifier[self] . identifier[cc] = identifier[params] . identifier[get] ( literal[string] , literal[int] /( identifier[self] . identifier[dim] + literal[int] ))
identifier[self] . identifier[cs] = identifier[params] . identifier[get] ( literal[string] ,( identifier[self] . identifier[mueff] + literal[int] )/
( identifier[self] . identifier[dim] + identifier[self] . identifier[mueff] + literal[int] ))
identifier[self] . identifier[ccov1] = identifier[params] . identifier[get] (
literal[string] , literal[int] /(( identifier[self] . identifier[dim] + literal[int] )** literal[int] + identifier[self] . identifier[mueff] ))
identifier[self] . identifier[ccovmu] = identifier[params] . identifier[get] ( literal[string] , literal[int] *(
identifier[self] . identifier[mueff] - literal[int] + literal[int] / identifier[self] . identifier[mueff] )/(
( identifier[self] . identifier[dim] + literal[int] )** literal[int] + identifier[self] . identifier[mueff] ))
identifier[self] . identifier[ccovmu] = identifier[min] ( literal[int] - identifier[self] . identifier[ccov1] , identifier[self] . identifier[ccovmu] )
identifier[self] . identifier[damps] = literal[int] + literal[int] * identifier[max] ( literal[int] , identifier[numpy] . identifier[sqrt] (( identifier[self] . identifier[mueff] - literal[int] )/( identifier[self] . identifier[dim] + literal[int] ))- literal[int] )+ identifier[self] . identifier[cs]
identifier[self] . identifier[damps] = identifier[params] . identifier[get] ( literal[string] , identifier[self] . identifier[damps] )
keyword[return] | def computeParams(self, params):
"""Computes the parameters depending on :math:`\\lambda`.
Notes
-----
It needs to be called again if :math:`\\lambda` changes during
evolution.
Parameters
----------
params:
A dictionary of the manually set parameters.
"""
self.mu = params.get('mu', int(self.lambda_ / 2))
rweights = params.get('weights', 'superlinear')
if rweights == 'superlinear':
self.weights = numpy.log(self.mu + 0.5) - numpy.log(numpy.arange(1, self.mu + 1)) # depends on [control=['if'], data=[]]
elif rweights == 'linear':
self.weights = self.mu + 0.5 - numpy.arange(1, self.mu + 1) # depends on [control=['if'], data=[]]
elif rweights == 'equal':
self.weights = numpy.ones(self.mu) # depends on [control=['if'], data=[]]
else:
raise RuntimeError('Unknown weights : %s' % rweights)
self.weights /= sum(self.weights)
self.mueff = 1.0 / sum(self.weights ** 2)
self.cc = params.get('ccum', 4.0 / (self.dim + 4.0))
self.cs = params.get('cs', (self.mueff + 2.0) / (self.dim + self.mueff + 3.0))
self.ccov1 = params.get('ccov1', 2.0 / ((self.dim + 1.3) ** 2 + self.mueff))
self.ccovmu = params.get('ccovmu', 2.0 * (self.mueff - 2.0 + 1.0 / self.mueff) / ((self.dim + 2.0) ** 2 + self.mueff))
self.ccovmu = min(1 - self.ccov1, self.ccovmu)
self.damps = 1.0 + 2.0 * max(0, numpy.sqrt((self.mueff - 1.0) / (self.dim + 1.0)) - 1.0) + self.cs
self.damps = params.get('damps', self.damps)
return |
def propagateRst(obj):
"""
Propagate reset "rst" signal
to all subcomponents
"""
rst = obj.rst
for u in obj._units:
_tryConnect(~rst, u, 'rst_n')
_tryConnect(rst, u, 'rst') | def function[propagateRst, parameter[obj]]:
constant[
Propagate reset "rst" signal
to all subcomponents
]
variable[rst] assign[=] name[obj].rst
for taget[name[u]] in starred[name[obj]._units] begin[:]
call[name[_tryConnect], parameter[<ast.UnaryOp object at 0x7da1b03f9fc0>, name[u], constant[rst_n]]]
call[name[_tryConnect], parameter[name[rst], name[u], constant[rst]]] | keyword[def] identifier[propagateRst] ( identifier[obj] ):
literal[string]
identifier[rst] = identifier[obj] . identifier[rst]
keyword[for] identifier[u] keyword[in] identifier[obj] . identifier[_units] :
identifier[_tryConnect] (~ identifier[rst] , identifier[u] , literal[string] )
identifier[_tryConnect] ( identifier[rst] , identifier[u] , literal[string] ) | def propagateRst(obj):
"""
Propagate reset "rst" signal
to all subcomponents
"""
rst = obj.rst
for u in obj._units:
_tryConnect(~rst, u, 'rst_n')
_tryConnect(rst, u, 'rst') # depends on [control=['for'], data=['u']] |
def update_additional_charge(self, *, recurring_billing_id, description, plan_value, plan_tax, plan_tax_return_base,
currency):
"""
Updates the information from an additional charge in an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
description:
plan_value:
plan_tax:
plan_tax_return_base:
currency:
Returns:
"""
payload = {
"description": description,
"additionalValues": [
{
"name": "ITEM_VALUE",
"value": plan_value,
"currency": currency
},
{
"name": "ITEM_TAX",
"value": plan_tax,
"currency": currency
},
{
"name": "ITEM_TAX_RETURN_BASE",
"value": plan_tax_return_base,
"currency": currency
}
]
}
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._put(self.url + fmt, payload=payload, headers=self.get_headers()) | def function[update_additional_charge, parameter[self]]:
constant[
Updates the information from an additional charge in an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
description:
plan_value:
plan_tax:
plan_tax_return_base:
currency:
Returns:
]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da18bccb970>, <ast.Constant object at 0x7da18bccb0a0>], [<ast.Name object at 0x7da18bcc8670>, <ast.List object at 0x7da18bcc9e40>]]
variable[fmt] assign[=] call[constant[recurringBillItems/{}].format, parameter[name[recurring_billing_id]]]
return[call[name[self].client._put, parameter[binary_operation[name[self].url + name[fmt]]]]] | keyword[def] identifier[update_additional_charge] ( identifier[self] ,*, identifier[recurring_billing_id] , identifier[description] , identifier[plan_value] , identifier[plan_tax] , identifier[plan_tax_return_base] ,
identifier[currency] ):
literal[string]
identifier[payload] ={
literal[string] : identifier[description] ,
literal[string] :[
{
literal[string] : literal[string] ,
literal[string] : identifier[plan_value] ,
literal[string] : identifier[currency]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[plan_tax] ,
literal[string] : identifier[currency]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[plan_tax_return_base] ,
literal[string] : identifier[currency]
}
]
}
identifier[fmt] = literal[string] . identifier[format] ( identifier[recurring_billing_id] )
keyword[return] identifier[self] . identifier[client] . identifier[_put] ( identifier[self] . identifier[url] + identifier[fmt] , identifier[payload] = identifier[payload] , identifier[headers] = identifier[self] . identifier[get_headers] ()) | def update_additional_charge(self, *, recurring_billing_id, description, plan_value, plan_tax, plan_tax_return_base, currency):
"""
Updates the information from an additional charge in an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
description:
plan_value:
plan_tax:
plan_tax_return_base:
currency:
Returns:
"""
payload = {'description': description, 'additionalValues': [{'name': 'ITEM_VALUE', 'value': plan_value, 'currency': currency}, {'name': 'ITEM_TAX', 'value': plan_tax, 'currency': currency}, {'name': 'ITEM_TAX_RETURN_BASE', 'value': plan_tax_return_base, 'currency': currency}]}
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._put(self.url + fmt, payload=payload, headers=self.get_headers()) |
def _finalize_profiles(self):
"""
Deal with the first walks by joining profiles to other stops within walking distance.
"""
for stop, stop_profile in self._stop_profiles.items():
assert (isinstance(stop_profile, NodeProfileMultiObjective))
neighbor_label_bags = []
walk_durations_to_neighbors = []
departure_arrival_stop_pairs = []
if stop_profile.get_walk_to_target_duration() != 0 and stop in self._walk_network.node:
neighbors = networkx.all_neighbors(self._walk_network, stop)
for neighbor in neighbors:
neighbor_profile = self._stop_profiles[neighbor]
assert (isinstance(neighbor_profile, NodeProfileMultiObjective))
neighbor_real_connection_labels = neighbor_profile.get_labels_for_real_connections()
neighbor_label_bags.append(neighbor_real_connection_labels)
walk_durations_to_neighbors.append(int(self._walk_network.get_edge_data(stop, neighbor)["d_walk"] /
self._walk_speed))
departure_arrival_stop_pairs.append((stop, neighbor))
stop_profile.finalize(neighbor_label_bags, walk_durations_to_neighbors, departure_arrival_stop_pairs) | def function[_finalize_profiles, parameter[self]]:
constant[
Deal with the first walks by joining profiles to other stops within walking distance.
]
for taget[tuple[[<ast.Name object at 0x7da1b000e020>, <ast.Name object at 0x7da1b000e080>]]] in starred[call[name[self]._stop_profiles.items, parameter[]]] begin[:]
assert[call[name[isinstance], parameter[name[stop_profile], name[NodeProfileMultiObjective]]]]
variable[neighbor_label_bags] assign[=] list[[]]
variable[walk_durations_to_neighbors] assign[=] list[[]]
variable[departure_arrival_stop_pairs] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b000f370> begin[:]
variable[neighbors] assign[=] call[name[networkx].all_neighbors, parameter[name[self]._walk_network, name[stop]]]
for taget[name[neighbor]] in starred[name[neighbors]] begin[:]
variable[neighbor_profile] assign[=] call[name[self]._stop_profiles][name[neighbor]]
assert[call[name[isinstance], parameter[name[neighbor_profile], name[NodeProfileMultiObjective]]]]
variable[neighbor_real_connection_labels] assign[=] call[name[neighbor_profile].get_labels_for_real_connections, parameter[]]
call[name[neighbor_label_bags].append, parameter[name[neighbor_real_connection_labels]]]
call[name[walk_durations_to_neighbors].append, parameter[call[name[int], parameter[binary_operation[call[call[name[self]._walk_network.get_edge_data, parameter[name[stop], name[neighbor]]]][constant[d_walk]] / name[self]._walk_speed]]]]]
call[name[departure_arrival_stop_pairs].append, parameter[tuple[[<ast.Name object at 0x7da1b000e230>, <ast.Name object at 0x7da1b000e2c0>]]]]
call[name[stop_profile].finalize, parameter[name[neighbor_label_bags], name[walk_durations_to_neighbors], name[departure_arrival_stop_pairs]]] | keyword[def] identifier[_finalize_profiles] ( identifier[self] ):
literal[string]
keyword[for] identifier[stop] , identifier[stop_profile] keyword[in] identifier[self] . identifier[_stop_profiles] . identifier[items] ():
keyword[assert] ( identifier[isinstance] ( identifier[stop_profile] , identifier[NodeProfileMultiObjective] ))
identifier[neighbor_label_bags] =[]
identifier[walk_durations_to_neighbors] =[]
identifier[departure_arrival_stop_pairs] =[]
keyword[if] identifier[stop_profile] . identifier[get_walk_to_target_duration] ()!= literal[int] keyword[and] identifier[stop] keyword[in] identifier[self] . identifier[_walk_network] . identifier[node] :
identifier[neighbors] = identifier[networkx] . identifier[all_neighbors] ( identifier[self] . identifier[_walk_network] , identifier[stop] )
keyword[for] identifier[neighbor] keyword[in] identifier[neighbors] :
identifier[neighbor_profile] = identifier[self] . identifier[_stop_profiles] [ identifier[neighbor] ]
keyword[assert] ( identifier[isinstance] ( identifier[neighbor_profile] , identifier[NodeProfileMultiObjective] ))
identifier[neighbor_real_connection_labels] = identifier[neighbor_profile] . identifier[get_labels_for_real_connections] ()
identifier[neighbor_label_bags] . identifier[append] ( identifier[neighbor_real_connection_labels] )
identifier[walk_durations_to_neighbors] . identifier[append] ( identifier[int] ( identifier[self] . identifier[_walk_network] . identifier[get_edge_data] ( identifier[stop] , identifier[neighbor] )[ literal[string] ]/
identifier[self] . identifier[_walk_speed] ))
identifier[departure_arrival_stop_pairs] . identifier[append] (( identifier[stop] , identifier[neighbor] ))
identifier[stop_profile] . identifier[finalize] ( identifier[neighbor_label_bags] , identifier[walk_durations_to_neighbors] , identifier[departure_arrival_stop_pairs] ) | def _finalize_profiles(self):
"""
Deal with the first walks by joining profiles to other stops within walking distance.
"""
for (stop, stop_profile) in self._stop_profiles.items():
assert isinstance(stop_profile, NodeProfileMultiObjective)
neighbor_label_bags = []
walk_durations_to_neighbors = []
departure_arrival_stop_pairs = []
if stop_profile.get_walk_to_target_duration() != 0 and stop in self._walk_network.node:
neighbors = networkx.all_neighbors(self._walk_network, stop)
for neighbor in neighbors:
neighbor_profile = self._stop_profiles[neighbor]
assert isinstance(neighbor_profile, NodeProfileMultiObjective)
neighbor_real_connection_labels = neighbor_profile.get_labels_for_real_connections()
neighbor_label_bags.append(neighbor_real_connection_labels)
walk_durations_to_neighbors.append(int(self._walk_network.get_edge_data(stop, neighbor)['d_walk'] / self._walk_speed))
departure_arrival_stop_pairs.append((stop, neighbor)) # depends on [control=['for'], data=['neighbor']] # depends on [control=['if'], data=[]]
stop_profile.finalize(neighbor_label_bags, walk_durations_to_neighbors, departure_arrival_stop_pairs) # depends on [control=['for'], data=[]] |
def _microtime():
'''
Return a Unix timestamp as a string of digits
:return:
'''
val1, val2 = math.modf(time.time())
val2 = int(val2)
return '{0:f}{1}'.format(val1, val2) | def function[_microtime, parameter[]]:
constant[
Return a Unix timestamp as a string of digits
:return:
]
<ast.Tuple object at 0x7da2047e9000> assign[=] call[name[math].modf, parameter[call[name[time].time, parameter[]]]]
variable[val2] assign[=] call[name[int], parameter[name[val2]]]
return[call[constant[{0:f}{1}].format, parameter[name[val1], name[val2]]]] | keyword[def] identifier[_microtime] ():
literal[string]
identifier[val1] , identifier[val2] = identifier[math] . identifier[modf] ( identifier[time] . identifier[time] ())
identifier[val2] = identifier[int] ( identifier[val2] )
keyword[return] literal[string] . identifier[format] ( identifier[val1] , identifier[val2] ) | def _microtime():
"""
Return a Unix timestamp as a string of digits
:return:
"""
(val1, val2) = math.modf(time.time())
val2 = int(val2)
return '{0:f}{1}'.format(val1, val2) |
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails | def function[Update, parameter[self, data]]:
constant[Updates a Beta distribution.
data: pair of int (heads, tails)
]
<ast.Tuple object at 0x7da20c991030> assign[=] name[data]
<ast.AugAssign object at 0x7da20c9930a0>
<ast.AugAssign object at 0x7da20c990250> | keyword[def] identifier[Update] ( identifier[self] , identifier[data] ):
literal[string]
identifier[heads] , identifier[tails] = identifier[data]
identifier[self] . identifier[alpha] += identifier[heads]
identifier[self] . identifier[beta] += identifier[tails] | def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
(heads, tails) = data
self.alpha += heads
self.beta += tails |
def lookup_ids(self, keys):
"""Lookup the integer ID associated with each (namespace, key) in the
keys list"""
keys_len = len(keys)
ids = {namespace_key: None for namespace_key in keys}
start = 0
bulk_insert = self.bulk_insert
query = 'SELECT namespace, key, id FROM gauged_keys WHERE '
check = '(namespace = %s AND key = %s) '
cursor = self.cursor
execute = cursor.execute
while start < keys_len:
rows = keys[start:start+bulk_insert]
params = [param for params in rows for param in params]
id_query = query + (check + ' OR ') * (len(rows) - 1) + check
execute(id_query, params)
for namespace, key, id_ in cursor:
ids[(namespace, key)] = id_
start += bulk_insert
return ids | def function[lookup_ids, parameter[self, keys]]:
constant[Lookup the integer ID associated with each (namespace, key) in the
keys list]
variable[keys_len] assign[=] call[name[len], parameter[name[keys]]]
variable[ids] assign[=] <ast.DictComp object at 0x7da1b24b7b50>
variable[start] assign[=] constant[0]
variable[bulk_insert] assign[=] name[self].bulk_insert
variable[query] assign[=] constant[SELECT namespace, key, id FROM gauged_keys WHERE ]
variable[check] assign[=] constant[(namespace = %s AND key = %s) ]
variable[cursor] assign[=] name[self].cursor
variable[execute] assign[=] name[cursor].execute
while compare[name[start] less[<] name[keys_len]] begin[:]
variable[rows] assign[=] call[name[keys]][<ast.Slice object at 0x7da1b24b4ac0>]
variable[params] assign[=] <ast.ListComp object at 0x7da1b24b7100>
variable[id_query] assign[=] binary_operation[binary_operation[name[query] + binary_operation[binary_operation[name[check] + constant[ OR ]] * binary_operation[call[name[len], parameter[name[rows]]] - constant[1]]]] + name[check]]
call[name[execute], parameter[name[id_query], name[params]]]
for taget[tuple[[<ast.Name object at 0x7da1b24b5300>, <ast.Name object at 0x7da1b24b5150>, <ast.Name object at 0x7da1b24b6110>]]] in starred[name[cursor]] begin[:]
call[name[ids]][tuple[[<ast.Name object at 0x7da1b24b7b20>, <ast.Name object at 0x7da1b24b6ad0>]]] assign[=] name[id_]
<ast.AugAssign object at 0x7da1b24b4a00>
return[name[ids]] | keyword[def] identifier[lookup_ids] ( identifier[self] , identifier[keys] ):
literal[string]
identifier[keys_len] = identifier[len] ( identifier[keys] )
identifier[ids] ={ identifier[namespace_key] : keyword[None] keyword[for] identifier[namespace_key] keyword[in] identifier[keys] }
identifier[start] = literal[int]
identifier[bulk_insert] = identifier[self] . identifier[bulk_insert]
identifier[query] = literal[string]
identifier[check] = literal[string]
identifier[cursor] = identifier[self] . identifier[cursor]
identifier[execute] = identifier[cursor] . identifier[execute]
keyword[while] identifier[start] < identifier[keys_len] :
identifier[rows] = identifier[keys] [ identifier[start] : identifier[start] + identifier[bulk_insert] ]
identifier[params] =[ identifier[param] keyword[for] identifier[params] keyword[in] identifier[rows] keyword[for] identifier[param] keyword[in] identifier[params] ]
identifier[id_query] = identifier[query] +( identifier[check] + literal[string] )*( identifier[len] ( identifier[rows] )- literal[int] )+ identifier[check]
identifier[execute] ( identifier[id_query] , identifier[params] )
keyword[for] identifier[namespace] , identifier[key] , identifier[id_] keyword[in] identifier[cursor] :
identifier[ids] [( identifier[namespace] , identifier[key] )]= identifier[id_]
identifier[start] += identifier[bulk_insert]
keyword[return] identifier[ids] | def lookup_ids(self, keys):
"""Lookup the integer ID associated with each (namespace, key) in the
keys list"""
keys_len = len(keys)
ids = {namespace_key: None for namespace_key in keys}
start = 0
bulk_insert = self.bulk_insert
query = 'SELECT namespace, key, id FROM gauged_keys WHERE '
check = '(namespace = %s AND key = %s) '
cursor = self.cursor
execute = cursor.execute
while start < keys_len:
rows = keys[start:start + bulk_insert]
params = [param for params in rows for param in params]
id_query = query + (check + ' OR ') * (len(rows) - 1) + check
execute(id_query, params)
for (namespace, key, id_) in cursor:
ids[namespace, key] = id_ # depends on [control=['for'], data=[]]
start += bulk_insert # depends on [control=['while'], data=['start']]
return ids |
def reply_bytes(self, request):
"""Take a `Request` and return an OP_MSG message as bytes."""
flags = struct.pack("<I", self._flags)
payload_type = struct.pack("<b", 0)
payload_data = bson.BSON.encode(self.doc)
data = b''.join([flags, payload_type, payload_data])
reply_id = random.randint(0, 1000000)
response_to = request.request_id
header = struct.pack(
"<iiii", 16 + len(data), reply_id, response_to, OP_MSG)
return header + data | def function[reply_bytes, parameter[self, request]]:
constant[Take a `Request` and return an OP_MSG message as bytes.]
variable[flags] assign[=] call[name[struct].pack, parameter[constant[<I], name[self]._flags]]
variable[payload_type] assign[=] call[name[struct].pack, parameter[constant[<b], constant[0]]]
variable[payload_data] assign[=] call[name[bson].BSON.encode, parameter[name[self].doc]]
variable[data] assign[=] call[constant[b''].join, parameter[list[[<ast.Name object at 0x7da1b27b91b0>, <ast.Name object at 0x7da1b27b9930>, <ast.Name object at 0x7da1b27b99c0>]]]]
variable[reply_id] assign[=] call[name[random].randint, parameter[constant[0], constant[1000000]]]
variable[response_to] assign[=] name[request].request_id
variable[header] assign[=] call[name[struct].pack, parameter[constant[<iiii], binary_operation[constant[16] + call[name[len], parameter[name[data]]]], name[reply_id], name[response_to], name[OP_MSG]]]
return[binary_operation[name[header] + name[data]]] | keyword[def] identifier[reply_bytes] ( identifier[self] , identifier[request] ):
literal[string]
identifier[flags] = identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[_flags] )
identifier[payload_type] = identifier[struct] . identifier[pack] ( literal[string] , literal[int] )
identifier[payload_data] = identifier[bson] . identifier[BSON] . identifier[encode] ( identifier[self] . identifier[doc] )
identifier[data] = literal[string] . identifier[join] ([ identifier[flags] , identifier[payload_type] , identifier[payload_data] ])
identifier[reply_id] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
identifier[response_to] = identifier[request] . identifier[request_id]
identifier[header] = identifier[struct] . identifier[pack] (
literal[string] , literal[int] + identifier[len] ( identifier[data] ), identifier[reply_id] , identifier[response_to] , identifier[OP_MSG] )
keyword[return] identifier[header] + identifier[data] | def reply_bytes(self, request):
"""Take a `Request` and return an OP_MSG message as bytes."""
flags = struct.pack('<I', self._flags)
payload_type = struct.pack('<b', 0)
payload_data = bson.BSON.encode(self.doc)
data = b''.join([flags, payload_type, payload_data])
reply_id = random.randint(0, 1000000)
response_to = request.request_id
header = struct.pack('<iiii', 16 + len(data), reply_id, response_to, OP_MSG)
return header + data |
def _default_transform_fn(self, model, content, content_type, accept):
"""Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type)
"""
try:
data = self._input_fn(content, content_type)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE)
prediction = self._predict_fn(data, model)
try:
result = self._output_fn(prediction, accept)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.NOT_ACCEPTABLE)
return result | def function[_default_transform_fn, parameter[self, model, content, content_type, accept]]:
constant[Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type)
]
<ast.Try object at 0x7da1b1646e90>
variable[prediction] assign[=] call[name[self]._predict_fn, parameter[name[data], name[model]]]
<ast.Try object at 0x7da1b1645120>
return[name[result]] | keyword[def] identifier[_default_transform_fn] ( identifier[self] , identifier[model] , identifier[content] , identifier[content_type] , identifier[accept] ):
literal[string]
keyword[try] :
identifier[data] = identifier[self] . identifier[_input_fn] ( identifier[content] , identifier[content_type] )
keyword[except] identifier[_errors] . identifier[UnsupportedFormatError] keyword[as] identifier[e] :
keyword[return] identifier[self] . identifier[_error_response] ( identifier[e] , identifier[http_client] . identifier[UNSUPPORTED_MEDIA_TYPE] )
identifier[prediction] = identifier[self] . identifier[_predict_fn] ( identifier[data] , identifier[model] )
keyword[try] :
identifier[result] = identifier[self] . identifier[_output_fn] ( identifier[prediction] , identifier[accept] )
keyword[except] identifier[_errors] . identifier[UnsupportedFormatError] keyword[as] identifier[e] :
keyword[return] identifier[self] . identifier[_error_response] ( identifier[e] , identifier[http_client] . identifier[NOT_ACCEPTABLE] )
keyword[return] identifier[result] | def _default_transform_fn(self, model, content, content_type, accept):
"""Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type)
"""
try:
data = self._input_fn(content, content_type) # depends on [control=['try'], data=[]]
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE) # depends on [control=['except'], data=['e']]
prediction = self._predict_fn(data, model)
try:
result = self._output_fn(prediction, accept) # depends on [control=['try'], data=[]]
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.NOT_ACCEPTABLE) # depends on [control=['except'], data=['e']]
return result |
def to_url(request):
"""Serialize as a URL for a GET request."""
scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))
query = parse_qs(query)
for key, value in request.data_and_params.iteritems():
query.setdefault(key, []).append(value)
query = urllib.urlencode(query, True)
return urlunsplit((scheme, netloc, path, query, fragment)) | def function[to_url, parameter[request]]:
constant[Serialize as a URL for a GET request.]
<ast.Tuple object at 0x7da20c6e54e0> assign[=] call[name[urlsplit], parameter[call[name[to_utf8], parameter[name[request].url]]]]
variable[query] assign[=] call[name[parse_qs], parameter[name[query]]]
for taget[tuple[[<ast.Name object at 0x7da20c6e6080>, <ast.Name object at 0x7da20c6e5420>]]] in starred[call[name[request].data_and_params.iteritems, parameter[]]] begin[:]
call[call[name[query].setdefault, parameter[name[key], list[[]]]].append, parameter[name[value]]]
variable[query] assign[=] call[name[urllib].urlencode, parameter[name[query], constant[True]]]
return[call[name[urlunsplit], parameter[tuple[[<ast.Name object at 0x7da20c6e75b0>, <ast.Name object at 0x7da20c6e63b0>, <ast.Name object at 0x7da20c6e7bb0>, <ast.Name object at 0x7da20c6e5c60>, <ast.Name object at 0x7da20c6e7e50>]]]]] | keyword[def] identifier[to_url] ( identifier[request] ):
literal[string]
identifier[scheme] , identifier[netloc] , identifier[path] , identifier[query] , identifier[fragment] = identifier[urlsplit] ( identifier[to_utf8] ( identifier[request] . identifier[url] ))
identifier[query] = identifier[parse_qs] ( identifier[query] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[request] . identifier[data_and_params] . identifier[iteritems] ():
identifier[query] . identifier[setdefault] ( identifier[key] ,[]). identifier[append] ( identifier[value] )
identifier[query] = identifier[urllib] . identifier[urlencode] ( identifier[query] , keyword[True] )
keyword[return] identifier[urlunsplit] (( identifier[scheme] , identifier[netloc] , identifier[path] , identifier[query] , identifier[fragment] )) | def to_url(request):
"""Serialize as a URL for a GET request."""
(scheme, netloc, path, query, fragment) = urlsplit(to_utf8(request.url))
query = parse_qs(query)
for (key, value) in request.data_and_params.iteritems():
query.setdefault(key, []).append(value) # depends on [control=['for'], data=[]]
query = urllib.urlencode(query, True)
return urlunsplit((scheme, netloc, path, query, fragment)) |
def disassemble_bytes(orig_msg, orig_msg_nocr, code, lasti=-1, cur_line=0,
start_line=-1, end_line=None, relative_pos=False,
varnames=(), names=(), constants=(), cells=(),
freevars=(), linestarts={}, highlight='light',
start_offset=0, end_offset=None):
"""Disassemble byte string of code. If end_line is negative
it counts the number of statement linestarts to use."""
statement_count = 10000
if end_line is None:
end_line = 10000
elif relative_pos:
end_line += start_line -1
pass
labels = findlabels(code)
null_print = lambda x: None
if start_line > cur_line:
msg_nocr = null_print
msg = null_print
else:
msg_nocr = orig_msg_nocr
msg = orig_msg
for instr in get_instructions_bytes(code, opc, varnames, names,
constants, cells, linestarts):
offset = instr.offset
if end_offset and offset > end_offset:
break
if instr.starts_line:
if offset:
msg("")
cur_line = instr.starts_line
if (start_line and ((start_line > cur_line) or
start_offset and start_offset > offset)) :
msg_nocr = null_print
msg = null_print
else:
statement_count -= 1
msg_nocr = orig_msg_nocr
msg = orig_msg
pass
if ((cur_line > end_line) or
(end_offset and offset > end_offset)):
break
msg_nocr(format_token(Mformat.LineNumber,
"%4d" % cur_line,
highlight=highlight))
else:
if start_offset and offset and start_offset <= offset:
msg_nocr = orig_msg_nocr
msg = orig_msg
pass
msg_nocr(' ')
if offset == lasti: msg_nocr(format_token(Mformat.Arrow, '-->',
highlight=highlight))
else: msg_nocr(' ')
if offset in labels: msg_nocr(format_token(Mformat.Arrow, '>>',
highlight=highlight))
else: msg_nocr(' ')
msg_nocr(repr(offset).rjust(4))
msg_nocr(' ')
msg_nocr(format_token(Mformat.Opcode,
instr.opname.ljust(20),
highlight=highlight))
msg_nocr(repr(instr.arg).ljust(10))
msg_nocr(' ')
# Show argval?
msg(format_token(Mformat.Name,
instr.argrepr.ljust(20),
highlight=highlight))
pass
return code, offset | def function[disassemble_bytes, parameter[orig_msg, orig_msg_nocr, code, lasti, cur_line, start_line, end_line, relative_pos, varnames, names, constants, cells, freevars, linestarts, highlight, start_offset, end_offset]]:
constant[Disassemble byte string of code. If end_line is negative
it counts the number of statement linestarts to use.]
variable[statement_count] assign[=] constant[10000]
if compare[name[end_line] is constant[None]] begin[:]
variable[end_line] assign[=] constant[10000]
variable[labels] assign[=] call[name[findlabels], parameter[name[code]]]
variable[null_print] assign[=] <ast.Lambda object at 0x7da1b0530c70>
if compare[name[start_line] greater[>] name[cur_line]] begin[:]
variable[msg_nocr] assign[=] name[null_print]
variable[msg] assign[=] name[null_print]
for taget[name[instr]] in starred[call[name[get_instructions_bytes], parameter[name[code], name[opc], name[varnames], name[names], name[constants], name[cells], name[linestarts]]]] begin[:]
variable[offset] assign[=] name[instr].offset
if <ast.BoolOp object at 0x7da1b05304c0> begin[:]
break
if name[instr].starts_line begin[:]
if name[offset] begin[:]
call[name[msg], parameter[constant[]]]
variable[cur_line] assign[=] name[instr].starts_line
if <ast.BoolOp object at 0x7da1b05327d0> begin[:]
variable[msg_nocr] assign[=] name[null_print]
variable[msg] assign[=] name[null_print]
if <ast.BoolOp object at 0x7da18f721d80> begin[:]
break
call[name[msg_nocr], parameter[call[name[format_token], parameter[name[Mformat].LineNumber, binary_operation[constant[%4d] <ast.Mod object at 0x7da2590d6920> name[cur_line]]]]]]
if compare[name[offset] equal[==] name[lasti]] begin[:]
call[name[msg_nocr], parameter[call[name[format_token], parameter[name[Mformat].Arrow, constant[-->]]]]]
if compare[name[offset] in name[labels]] begin[:]
call[name[msg_nocr], parameter[call[name[format_token], parameter[name[Mformat].Arrow, constant[>>]]]]]
call[name[msg_nocr], parameter[call[call[name[repr], parameter[name[offset]]].rjust, parameter[constant[4]]]]]
call[name[msg_nocr], parameter[constant[ ]]]
call[name[msg_nocr], parameter[call[name[format_token], parameter[name[Mformat].Opcode, call[name[instr].opname.ljust, parameter[constant[20]]]]]]]
call[name[msg_nocr], parameter[call[call[name[repr], parameter[name[instr].arg]].ljust, parameter[constant[10]]]]]
call[name[msg_nocr], parameter[constant[ ]]]
call[name[msg], parameter[call[name[format_token], parameter[name[Mformat].Name, call[name[instr].argrepr.ljust, parameter[constant[20]]]]]]]
pass
return[tuple[[<ast.Name object at 0x7da1b0381930>, <ast.Name object at 0x7da1b0383520>]]] | keyword[def] identifier[disassemble_bytes] ( identifier[orig_msg] , identifier[orig_msg_nocr] , identifier[code] , identifier[lasti] =- literal[int] , identifier[cur_line] = literal[int] ,
identifier[start_line] =- literal[int] , identifier[end_line] = keyword[None] , identifier[relative_pos] = keyword[False] ,
identifier[varnames] =(), identifier[names] =(), identifier[constants] =(), identifier[cells] =(),
identifier[freevars] =(), identifier[linestarts] ={}, identifier[highlight] = literal[string] ,
identifier[start_offset] = literal[int] , identifier[end_offset] = keyword[None] ):
literal[string]
identifier[statement_count] = literal[int]
keyword[if] identifier[end_line] keyword[is] keyword[None] :
identifier[end_line] = literal[int]
keyword[elif] identifier[relative_pos] :
identifier[end_line] += identifier[start_line] - literal[int]
keyword[pass]
identifier[labels] = identifier[findlabels] ( identifier[code] )
identifier[null_print] = keyword[lambda] identifier[x] : keyword[None]
keyword[if] identifier[start_line] > identifier[cur_line] :
identifier[msg_nocr] = identifier[null_print]
identifier[msg] = identifier[null_print]
keyword[else] :
identifier[msg_nocr] = identifier[orig_msg_nocr]
identifier[msg] = identifier[orig_msg]
keyword[for] identifier[instr] keyword[in] identifier[get_instructions_bytes] ( identifier[code] , identifier[opc] , identifier[varnames] , identifier[names] ,
identifier[constants] , identifier[cells] , identifier[linestarts] ):
identifier[offset] = identifier[instr] . identifier[offset]
keyword[if] identifier[end_offset] keyword[and] identifier[offset] > identifier[end_offset] :
keyword[break]
keyword[if] identifier[instr] . identifier[starts_line] :
keyword[if] identifier[offset] :
identifier[msg] ( literal[string] )
identifier[cur_line] = identifier[instr] . identifier[starts_line]
keyword[if] ( identifier[start_line] keyword[and] (( identifier[start_line] > identifier[cur_line] ) keyword[or]
identifier[start_offset] keyword[and] identifier[start_offset] > identifier[offset] )):
identifier[msg_nocr] = identifier[null_print]
identifier[msg] = identifier[null_print]
keyword[else] :
identifier[statement_count] -= literal[int]
identifier[msg_nocr] = identifier[orig_msg_nocr]
identifier[msg] = identifier[orig_msg]
keyword[pass]
keyword[if] (( identifier[cur_line] > identifier[end_line] ) keyword[or]
( identifier[end_offset] keyword[and] identifier[offset] > identifier[end_offset] )):
keyword[break]
identifier[msg_nocr] ( identifier[format_token] ( identifier[Mformat] . identifier[LineNumber] ,
literal[string] % identifier[cur_line] ,
identifier[highlight] = identifier[highlight] ))
keyword[else] :
keyword[if] identifier[start_offset] keyword[and] identifier[offset] keyword[and] identifier[start_offset] <= identifier[offset] :
identifier[msg_nocr] = identifier[orig_msg_nocr]
identifier[msg] = identifier[orig_msg]
keyword[pass]
identifier[msg_nocr] ( literal[string] )
keyword[if] identifier[offset] == identifier[lasti] : identifier[msg_nocr] ( identifier[format_token] ( identifier[Mformat] . identifier[Arrow] , literal[string] ,
identifier[highlight] = identifier[highlight] ))
keyword[else] : identifier[msg_nocr] ( literal[string] )
keyword[if] identifier[offset] keyword[in] identifier[labels] : identifier[msg_nocr] ( identifier[format_token] ( identifier[Mformat] . identifier[Arrow] , literal[string] ,
identifier[highlight] = identifier[highlight] ))
keyword[else] : identifier[msg_nocr] ( literal[string] )
identifier[msg_nocr] ( identifier[repr] ( identifier[offset] ). identifier[rjust] ( literal[int] ))
identifier[msg_nocr] ( literal[string] )
identifier[msg_nocr] ( identifier[format_token] ( identifier[Mformat] . identifier[Opcode] ,
identifier[instr] . identifier[opname] . identifier[ljust] ( literal[int] ),
identifier[highlight] = identifier[highlight] ))
identifier[msg_nocr] ( identifier[repr] ( identifier[instr] . identifier[arg] ). identifier[ljust] ( literal[int] ))
identifier[msg_nocr] ( literal[string] )
identifier[msg] ( identifier[format_token] ( identifier[Mformat] . identifier[Name] ,
identifier[instr] . identifier[argrepr] . identifier[ljust] ( literal[int] ),
identifier[highlight] = identifier[highlight] ))
keyword[pass]
keyword[return] identifier[code] , identifier[offset] | def disassemble_bytes(orig_msg, orig_msg_nocr, code, lasti=-1, cur_line=0, start_line=-1, end_line=None, relative_pos=False, varnames=(), names=(), constants=(), cells=(), freevars=(), linestarts={}, highlight='light', start_offset=0, end_offset=None):
"""Disassemble byte string of code. If end_line is negative
it counts the number of statement linestarts to use."""
statement_count = 10000
if end_line is None:
end_line = 10000 # depends on [control=['if'], data=['end_line']]
elif relative_pos:
end_line += start_line - 1
pass # depends on [control=['if'], data=[]]
labels = findlabels(code)
null_print = lambda x: None
if start_line > cur_line:
msg_nocr = null_print
msg = null_print # depends on [control=['if'], data=[]]
else:
msg_nocr = orig_msg_nocr
msg = orig_msg
for instr in get_instructions_bytes(code, opc, varnames, names, constants, cells, linestarts):
offset = instr.offset
if end_offset and offset > end_offset:
break # depends on [control=['if'], data=[]]
if instr.starts_line:
if offset:
msg('') # depends on [control=['if'], data=[]]
cur_line = instr.starts_line
if start_line and (start_line > cur_line or (start_offset and start_offset > offset)):
msg_nocr = null_print
msg = null_print # depends on [control=['if'], data=[]]
else:
statement_count -= 1
msg_nocr = orig_msg_nocr
msg = orig_msg
pass
if cur_line > end_line or (end_offset and offset > end_offset):
break # depends on [control=['if'], data=[]]
msg_nocr(format_token(Mformat.LineNumber, '%4d' % cur_line, highlight=highlight)) # depends on [control=['if'], data=[]]
else:
if start_offset and offset and (start_offset <= offset):
msg_nocr = orig_msg_nocr
msg = orig_msg
pass # depends on [control=['if'], data=[]]
msg_nocr(' ')
if offset == lasti:
msg_nocr(format_token(Mformat.Arrow, '-->', highlight=highlight)) # depends on [control=['if'], data=[]]
else:
msg_nocr(' ')
if offset in labels:
msg_nocr(format_token(Mformat.Arrow, '>>', highlight=highlight)) # depends on [control=['if'], data=[]]
else:
msg_nocr(' ')
msg_nocr(repr(offset).rjust(4))
msg_nocr(' ')
msg_nocr(format_token(Mformat.Opcode, instr.opname.ljust(20), highlight=highlight))
msg_nocr(repr(instr.arg).ljust(10))
msg_nocr(' ')
# Show argval?
msg(format_token(Mformat.Name, instr.argrepr.ljust(20), highlight=highlight))
pass # depends on [control=['for'], data=['instr']]
return (code, offset) |
def profile(message=None, verbose=False):
"""Decorator for profiling a function.
TODO: Support `@profile` syntax (without parens). This would involve
inspecting the args. In this case `profile` would receive a single
argument, which is the function to be decorated.
"""
import functools
from harrison.registered_timer import RegisteredTimer
# Adjust the call stack index for RegisteredTimer so the call is Timer use
# is properly attributed.
class DecoratorTimer(RegisteredTimer):
_CALLER_STACK_INDEX = 2
def wrapper(fn):
desc = message or fn.__name__
@functools.wraps(fn)
def wrapped_fn(*args, **kwargs):
with DecoratorTimer(desc=desc, verbose=verbose):
return fn(*args, **kwargs)
return wrapped_fn
return wrapper | def function[profile, parameter[message, verbose]]:
constant[Decorator for profiling a function.
TODO: Support `@profile` syntax (without parens). This would involve
inspecting the args. In this case `profile` would receive a single
argument, which is the function to be decorated.
]
import module[functools]
from relative_module[harrison.registered_timer] import module[RegisteredTimer]
class class[DecoratorTimer, parameter[]] begin[:]
variable[_CALLER_STACK_INDEX] assign[=] constant[2]
def function[wrapper, parameter[fn]]:
variable[desc] assign[=] <ast.BoolOp object at 0x7da207f03310>
def function[wrapped_fn, parameter[]]:
with call[name[DecoratorTimer], parameter[]] begin[:]
return[call[name[fn], parameter[<ast.Starred object at 0x7da207f031c0>]]]
return[name[wrapped_fn]]
return[name[wrapper]] | keyword[def] identifier[profile] ( identifier[message] = keyword[None] , identifier[verbose] = keyword[False] ):
literal[string]
keyword[import] identifier[functools]
keyword[from] identifier[harrison] . identifier[registered_timer] keyword[import] identifier[RegisteredTimer]
keyword[class] identifier[DecoratorTimer] ( identifier[RegisteredTimer] ):
identifier[_CALLER_STACK_INDEX] = literal[int]
keyword[def] identifier[wrapper] ( identifier[fn] ):
identifier[desc] = identifier[message] keyword[or] identifier[fn] . identifier[__name__]
@ identifier[functools] . identifier[wraps] ( identifier[fn] )
keyword[def] identifier[wrapped_fn] (* identifier[args] ,** identifier[kwargs] ):
keyword[with] identifier[DecoratorTimer] ( identifier[desc] = identifier[desc] , identifier[verbose] = identifier[verbose] ):
keyword[return] identifier[fn] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapped_fn]
keyword[return] identifier[wrapper] | def profile(message=None, verbose=False):
"""Decorator for profiling a function.
TODO: Support `@profile` syntax (without parens). This would involve
inspecting the args. In this case `profile` would receive a single
argument, which is the function to be decorated.
"""
import functools
from harrison.registered_timer import RegisteredTimer
# Adjust the call stack index for RegisteredTimer so the call is Timer use
# is properly attributed.
class DecoratorTimer(RegisteredTimer):
_CALLER_STACK_INDEX = 2
def wrapper(fn):
desc = message or fn.__name__
@functools.wraps(fn)
def wrapped_fn(*args, **kwargs):
with DecoratorTimer(desc=desc, verbose=verbose):
return fn(*args, **kwargs) # depends on [control=['with'], data=[]]
return wrapped_fn
return wrapper |
def get_observations_for_site(self, site_id, frequency='hourly'):
"""
Get observations for the provided site
Returns hourly observations for the previous 24 hours
"""
data = self.__call_api(site_id,{"res":frequency}, OBSERVATION_URL)
params = data['SiteRep']['Wx']['Param']
observation = Observation()
observation.data_date = data['SiteRep']['DV']['dataDate']
observation.data_date = datetime.strptime(data['SiteRep']['DV']['dataDate'], DATA_DATE_FORMAT).replace(tzinfo=pytz.UTC)
observation.continent = data['SiteRep']['DV']['Location']['continent']
observation.country = data['SiteRep']['DV']['Location']['country']
observation.name = data['SiteRep']['DV']['Location']['name']
observation.longitude = data['SiteRep']['DV']['Location']['lon']
observation.latitude = data['SiteRep']['DV']['Location']['lat']
observation.id = data['SiteRep']['DV']['Location']['i']
observation.elevation = data['SiteRep']['DV']['Location']['elevation']
for day in data['SiteRep']['DV']['Location']['Period']:
new_day = Day()
new_day.date = datetime.strptime(day['value'], DATE_FORMAT).replace(tzinfo=pytz.UTC)
# If the day only has 1 timestep, put it into a list by itself so it can be treated
# the same as a day with multiple timesteps
if type(day['Rep']) is not list:
day['Rep'] = [day['Rep']]
for timestep in day['Rep']:
# As stated in
# https://www.metoffice.gov.uk/datapoint/product/uk-hourly-site-specific-observations,
# some sites do not have all parameters available for
# observations. The documentation does not state which
# fields may be absent. If the parameter is not available,
# nothing is returned from the API. If this happens the
# value of the element is set to 'Not reported'. This may
# change to the element not being assigned to the timestep.
new_timestep = Timestep()
# Assume the '$' field is always present.
new_timestep.name = int(timestep['$'])
cur_elements = ELEMENTS['Observation']
new_timestep.date = datetime.strptime(day['value'], DATE_FORMAT).replace(tzinfo=pytz.UTC) + timedelta(minutes=int(timestep['$']))
if cur_elements['W'] in timestep:
new_timestep.weather = \
Element(cur_elements['W'],
timestep[cur_elements['W']],
self._get_wx_units(params, cur_elements['W']))
new_timestep.weather.text = \
self._weather_to_text(int(timestep[cur_elements['W']]))
else:
new_timestep.weather = \
Element(cur_elements['W'],
'Not reported')
if cur_elements['T'] in timestep:
new_timestep.temperature = \
Element(cur_elements['T'],
float(timestep[cur_elements['T']]),
self._get_wx_units(params, cur_elements['T']))
else:
new_timestep.temperature = \
Element(cur_elements['T'],
'Not reported')
if 'S' in timestep:
new_timestep.wind_speed = \
Element(cur_elements['S'],
int(timestep[cur_elements['S']]),
self._get_wx_units(params, cur_elements['S']))
else:
new_timestep.wind_speed = \
Element(cur_elements['S'],
'Not reported')
if 'D' in timestep:
new_timestep.wind_direction = \
Element(cur_elements['D'],
timestep[cur_elements['D']],
self._get_wx_units(params, cur_elements['D']))
else:
new_timestep.wind_direction = \
Element(cur_elements['D'],
'Not reported')
if cur_elements['V'] in timestep:
new_timestep.visibility = \
Element(cur_elements['V'],
int(timestep[cur_elements['V']]),
self._get_wx_units(params, cur_elements['V']))
new_timestep.visibility.text = self._visibility_to_text(int(timestep[cur_elements['V']]))
else:
new_timestep.visibility = \
Element(cur_elements['V'],
'Not reported')
if cur_elements['H'] in timestep:
new_timestep.humidity = \
Element(cur_elements['H'],
float(timestep[cur_elements['H']]),
self._get_wx_units(params, cur_elements['H']))
else:
new_timestep.humidity = \
Element(cur_elements['H'],
'Not reported')
if cur_elements['Dp'] in timestep:
new_timestep.dew_point = \
Element(cur_elements['Dp'],
float(timestep[cur_elements['Dp']]),
self._get_wx_units(params,
cur_elements['Dp']))
else:
new_timestep.dew_point = \
Element(cur_elements['Dp'],
'Not reported')
if cur_elements['P'] in timestep:
new_timestep.pressure = \
Element(cur_elements['P'],
float(timestep[cur_elements['P']]),
self._get_wx_units(params, cur_elements['P']))
else:
new_timestep.pressure = \
Element(cur_elements['P'],
'Not reported')
if cur_elements['Pt'] in timestep:
new_timestep.pressure_tendency = \
Element(cur_elements['Pt'],
timestep[cur_elements['Pt']],
self._get_wx_units(params, cur_elements['Pt']))
else:
new_timestep.pressure_tendency = \
Element(cur_elements['Pt'],
'Not reported')
new_day.timesteps.append(new_timestep)
observation.days.append(new_day)
return observation | def function[get_observations_for_site, parameter[self, site_id, frequency]]:
constant[
Get observations for the provided site
Returns hourly observations for the previous 24 hours
]
variable[data] assign[=] call[name[self].__call_api, parameter[name[site_id], dictionary[[<ast.Constant object at 0x7da2041db940>], [<ast.Name object at 0x7da2041dbd90>]], name[OBSERVATION_URL]]]
variable[params] assign[=] call[call[call[name[data]][constant[SiteRep]]][constant[Wx]]][constant[Param]]
variable[observation] assign[=] call[name[Observation], parameter[]]
name[observation].data_date assign[=] call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[dataDate]]
name[observation].data_date assign[=] call[call[name[datetime].strptime, parameter[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[dataDate]], name[DATA_DATE_FORMAT]]].replace, parameter[]]
name[observation].continent assign[=] call[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[Location]]][constant[continent]]
name[observation].country assign[=] call[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[Location]]][constant[country]]
name[observation].name assign[=] call[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[Location]]][constant[name]]
name[observation].longitude assign[=] call[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[Location]]][constant[lon]]
name[observation].latitude assign[=] call[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[Location]]][constant[lat]]
name[observation].id assign[=] call[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[Location]]][constant[i]]
name[observation].elevation assign[=] call[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[Location]]][constant[elevation]]
for taget[name[day]] in starred[call[call[call[call[name[data]][constant[SiteRep]]][constant[DV]]][constant[Location]]][constant[Period]]] begin[:]
variable[new_day] assign[=] call[name[Day], parameter[]]
name[new_day].date assign[=] call[call[name[datetime].strptime, parameter[call[name[day]][constant[value]], name[DATE_FORMAT]]].replace, parameter[]]
if compare[call[name[type], parameter[call[name[day]][constant[Rep]]]] is_not name[list]] begin[:]
call[name[day]][constant[Rep]] assign[=] list[[<ast.Subscript object at 0x7da18bcc89a0>]]
for taget[name[timestep]] in starred[call[name[day]][constant[Rep]]] begin[:]
variable[new_timestep] assign[=] call[name[Timestep], parameter[]]
name[new_timestep].name assign[=] call[name[int], parameter[call[name[timestep]][constant[$]]]]
variable[cur_elements] assign[=] call[name[ELEMENTS]][constant[Observation]]
name[new_timestep].date assign[=] binary_operation[call[call[name[datetime].strptime, parameter[call[name[day]][constant[value]], name[DATE_FORMAT]]].replace, parameter[]] + call[name[timedelta], parameter[]]]
if compare[call[name[cur_elements]][constant[W]] in name[timestep]] begin[:]
name[new_timestep].weather assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[W]], call[name[timestep]][call[name[cur_elements]][constant[W]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[W]]]]]]
name[new_timestep].weather.text assign[=] call[name[self]._weather_to_text, parameter[call[name[int], parameter[call[name[timestep]][call[name[cur_elements]][constant[W]]]]]]]
if compare[call[name[cur_elements]][constant[T]] in name[timestep]] begin[:]
name[new_timestep].temperature assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[T]], call[name[float], parameter[call[name[timestep]][call[name[cur_elements]][constant[T]]]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[T]]]]]]
if compare[constant[S] in name[timestep]] begin[:]
name[new_timestep].wind_speed assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[S]], call[name[int], parameter[call[name[timestep]][call[name[cur_elements]][constant[S]]]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[S]]]]]]
if compare[constant[D] in name[timestep]] begin[:]
name[new_timestep].wind_direction assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[D]], call[name[timestep]][call[name[cur_elements]][constant[D]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[D]]]]]]
if compare[call[name[cur_elements]][constant[V]] in name[timestep]] begin[:]
name[new_timestep].visibility assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[V]], call[name[int], parameter[call[name[timestep]][call[name[cur_elements]][constant[V]]]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[V]]]]]]
name[new_timestep].visibility.text assign[=] call[name[self]._visibility_to_text, parameter[call[name[int], parameter[call[name[timestep]][call[name[cur_elements]][constant[V]]]]]]]
if compare[call[name[cur_elements]][constant[H]] in name[timestep]] begin[:]
name[new_timestep].humidity assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[H]], call[name[float], parameter[call[name[timestep]][call[name[cur_elements]][constant[H]]]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[H]]]]]]
if compare[call[name[cur_elements]][constant[Dp]] in name[timestep]] begin[:]
name[new_timestep].dew_point assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[Dp]], call[name[float], parameter[call[name[timestep]][call[name[cur_elements]][constant[Dp]]]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[Dp]]]]]]
if compare[call[name[cur_elements]][constant[P]] in name[timestep]] begin[:]
name[new_timestep].pressure assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[P]], call[name[float], parameter[call[name[timestep]][call[name[cur_elements]][constant[P]]]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[P]]]]]]
if compare[call[name[cur_elements]][constant[Pt]] in name[timestep]] begin[:]
name[new_timestep].pressure_tendency assign[=] call[name[Element], parameter[call[name[cur_elements]][constant[Pt]], call[name[timestep]][call[name[cur_elements]][constant[Pt]]], call[name[self]._get_wx_units, parameter[name[params], call[name[cur_elements]][constant[Pt]]]]]]
call[name[new_day].timesteps.append, parameter[name[new_timestep]]]
call[name[observation].days.append, parameter[name[new_day]]]
return[name[observation]] | keyword[def] identifier[get_observations_for_site] ( identifier[self] , identifier[site_id] , identifier[frequency] = literal[string] ):
literal[string]
identifier[data] = identifier[self] . identifier[__call_api] ( identifier[site_id] ,{ literal[string] : identifier[frequency] }, identifier[OBSERVATION_URL] )
identifier[params] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[observation] = identifier[Observation] ()
identifier[observation] . identifier[data_date] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[observation] . identifier[data_date] = identifier[datetime] . identifier[strptime] ( identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ], identifier[DATA_DATE_FORMAT] ). identifier[replace] ( identifier[tzinfo] = identifier[pytz] . identifier[UTC] )
identifier[observation] . identifier[continent] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
identifier[observation] . identifier[country] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
identifier[observation] . identifier[name] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
identifier[observation] . identifier[longitude] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
identifier[observation] . identifier[latitude] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
identifier[observation] . identifier[id] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
identifier[observation] . identifier[elevation] = identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]
keyword[for] identifier[day] keyword[in] identifier[data] [ literal[string] ][ literal[string] ][ literal[string] ][ literal[string] ]:
identifier[new_day] = identifier[Day] ()
identifier[new_day] . identifier[date] = identifier[datetime] . identifier[strptime] ( identifier[day] [ literal[string] ], identifier[DATE_FORMAT] ). identifier[replace] ( identifier[tzinfo] = identifier[pytz] . identifier[UTC] )
keyword[if] identifier[type] ( identifier[day] [ literal[string] ]) keyword[is] keyword[not] identifier[list] :
identifier[day] [ literal[string] ]=[ identifier[day] [ literal[string] ]]
keyword[for] identifier[timestep] keyword[in] identifier[day] [ literal[string] ]:
identifier[new_timestep] = identifier[Timestep] ()
identifier[new_timestep] . identifier[name] = identifier[int] ( identifier[timestep] [ literal[string] ])
identifier[cur_elements] = identifier[ELEMENTS] [ literal[string] ]
identifier[new_timestep] . identifier[date] = identifier[datetime] . identifier[strptime] ( identifier[day] [ literal[string] ], identifier[DATE_FORMAT] ). identifier[replace] ( identifier[tzinfo] = identifier[pytz] . identifier[UTC] )+ identifier[timedelta] ( identifier[minutes] = identifier[int] ( identifier[timestep] [ literal[string] ]))
keyword[if] identifier[cur_elements] [ literal[string] ] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[weather] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[timestep] [ identifier[cur_elements] [ literal[string] ]],
identifier[self] . identifier[_get_wx_units] ( identifier[params] , identifier[cur_elements] [ literal[string] ]))
identifier[new_timestep] . identifier[weather] . identifier[text] = identifier[self] . identifier[_weather_to_text] ( identifier[int] ( identifier[timestep] [ identifier[cur_elements] [ literal[string] ]]))
keyword[else] :
identifier[new_timestep] . identifier[weather] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
keyword[if] identifier[cur_elements] [ literal[string] ] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[temperature] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[float] ( identifier[timestep] [ identifier[cur_elements] [ literal[string] ]]),
identifier[self] . identifier[_get_wx_units] ( identifier[params] , identifier[cur_elements] [ literal[string] ]))
keyword[else] :
identifier[new_timestep] . identifier[temperature] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
keyword[if] literal[string] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[wind_speed] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[int] ( identifier[timestep] [ identifier[cur_elements] [ literal[string] ]]),
identifier[self] . identifier[_get_wx_units] ( identifier[params] , identifier[cur_elements] [ literal[string] ]))
keyword[else] :
identifier[new_timestep] . identifier[wind_speed] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
keyword[if] literal[string] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[wind_direction] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[timestep] [ identifier[cur_elements] [ literal[string] ]],
identifier[self] . identifier[_get_wx_units] ( identifier[params] , identifier[cur_elements] [ literal[string] ]))
keyword[else] :
identifier[new_timestep] . identifier[wind_direction] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
keyword[if] identifier[cur_elements] [ literal[string] ] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[visibility] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[int] ( identifier[timestep] [ identifier[cur_elements] [ literal[string] ]]),
identifier[self] . identifier[_get_wx_units] ( identifier[params] , identifier[cur_elements] [ literal[string] ]))
identifier[new_timestep] . identifier[visibility] . identifier[text] = identifier[self] . identifier[_visibility_to_text] ( identifier[int] ( identifier[timestep] [ identifier[cur_elements] [ literal[string] ]]))
keyword[else] :
identifier[new_timestep] . identifier[visibility] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
keyword[if] identifier[cur_elements] [ literal[string] ] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[humidity] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[float] ( identifier[timestep] [ identifier[cur_elements] [ literal[string] ]]),
identifier[self] . identifier[_get_wx_units] ( identifier[params] , identifier[cur_elements] [ literal[string] ]))
keyword[else] :
identifier[new_timestep] . identifier[humidity] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
keyword[if] identifier[cur_elements] [ literal[string] ] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[dew_point] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[float] ( identifier[timestep] [ identifier[cur_elements] [ literal[string] ]]),
identifier[self] . identifier[_get_wx_units] ( identifier[params] ,
identifier[cur_elements] [ literal[string] ]))
keyword[else] :
identifier[new_timestep] . identifier[dew_point] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
keyword[if] identifier[cur_elements] [ literal[string] ] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[pressure] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[float] ( identifier[timestep] [ identifier[cur_elements] [ literal[string] ]]),
identifier[self] . identifier[_get_wx_units] ( identifier[params] , identifier[cur_elements] [ literal[string] ]))
keyword[else] :
identifier[new_timestep] . identifier[pressure] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
keyword[if] identifier[cur_elements] [ literal[string] ] keyword[in] identifier[timestep] :
identifier[new_timestep] . identifier[pressure_tendency] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
identifier[timestep] [ identifier[cur_elements] [ literal[string] ]],
identifier[self] . identifier[_get_wx_units] ( identifier[params] , identifier[cur_elements] [ literal[string] ]))
keyword[else] :
identifier[new_timestep] . identifier[pressure_tendency] = identifier[Element] ( identifier[cur_elements] [ literal[string] ],
literal[string] )
identifier[new_day] . identifier[timesteps] . identifier[append] ( identifier[new_timestep] )
identifier[observation] . identifier[days] . identifier[append] ( identifier[new_day] )
keyword[return] identifier[observation] | def get_observations_for_site(self, site_id, frequency='hourly'):
"""
Get observations for the provided site
Returns hourly observations for the previous 24 hours
"""
data = self.__call_api(site_id, {'res': frequency}, OBSERVATION_URL)
params = data['SiteRep']['Wx']['Param']
observation = Observation()
observation.data_date = data['SiteRep']['DV']['dataDate']
observation.data_date = datetime.strptime(data['SiteRep']['DV']['dataDate'], DATA_DATE_FORMAT).replace(tzinfo=pytz.UTC)
observation.continent = data['SiteRep']['DV']['Location']['continent']
observation.country = data['SiteRep']['DV']['Location']['country']
observation.name = data['SiteRep']['DV']['Location']['name']
observation.longitude = data['SiteRep']['DV']['Location']['lon']
observation.latitude = data['SiteRep']['DV']['Location']['lat']
observation.id = data['SiteRep']['DV']['Location']['i']
observation.elevation = data['SiteRep']['DV']['Location']['elevation']
for day in data['SiteRep']['DV']['Location']['Period']:
new_day = Day()
new_day.date = datetime.strptime(day['value'], DATE_FORMAT).replace(tzinfo=pytz.UTC)
# If the day only has 1 timestep, put it into a list by itself so it can be treated
# the same as a day with multiple timesteps
if type(day['Rep']) is not list:
day['Rep'] = [day['Rep']] # depends on [control=['if'], data=[]]
for timestep in day['Rep']:
# As stated in
# https://www.metoffice.gov.uk/datapoint/product/uk-hourly-site-specific-observations,
# some sites do not have all parameters available for
# observations. The documentation does not state which
# fields may be absent. If the parameter is not available,
# nothing is returned from the API. If this happens the
# value of the element is set to 'Not reported'. This may
# change to the element not being assigned to the timestep.
new_timestep = Timestep()
# Assume the '$' field is always present.
new_timestep.name = int(timestep['$'])
cur_elements = ELEMENTS['Observation']
new_timestep.date = datetime.strptime(day['value'], DATE_FORMAT).replace(tzinfo=pytz.UTC) + timedelta(minutes=int(timestep['$']))
if cur_elements['W'] in timestep:
new_timestep.weather = Element(cur_elements['W'], timestep[cur_elements['W']], self._get_wx_units(params, cur_elements['W']))
new_timestep.weather.text = self._weather_to_text(int(timestep[cur_elements['W']])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.weather = Element(cur_elements['W'], 'Not reported')
if cur_elements['T'] in timestep:
new_timestep.temperature = Element(cur_elements['T'], float(timestep[cur_elements['T']]), self._get_wx_units(params, cur_elements['T'])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.temperature = Element(cur_elements['T'], 'Not reported')
if 'S' in timestep:
new_timestep.wind_speed = Element(cur_elements['S'], int(timestep[cur_elements['S']]), self._get_wx_units(params, cur_elements['S'])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.wind_speed = Element(cur_elements['S'], 'Not reported')
if 'D' in timestep:
new_timestep.wind_direction = Element(cur_elements['D'], timestep[cur_elements['D']], self._get_wx_units(params, cur_elements['D'])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.wind_direction = Element(cur_elements['D'], 'Not reported')
if cur_elements['V'] in timestep:
new_timestep.visibility = Element(cur_elements['V'], int(timestep[cur_elements['V']]), self._get_wx_units(params, cur_elements['V']))
new_timestep.visibility.text = self._visibility_to_text(int(timestep[cur_elements['V']])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.visibility = Element(cur_elements['V'], 'Not reported')
if cur_elements['H'] in timestep:
new_timestep.humidity = Element(cur_elements['H'], float(timestep[cur_elements['H']]), self._get_wx_units(params, cur_elements['H'])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.humidity = Element(cur_elements['H'], 'Not reported')
if cur_elements['Dp'] in timestep:
new_timestep.dew_point = Element(cur_elements['Dp'], float(timestep[cur_elements['Dp']]), self._get_wx_units(params, cur_elements['Dp'])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.dew_point = Element(cur_elements['Dp'], 'Not reported')
if cur_elements['P'] in timestep:
new_timestep.pressure = Element(cur_elements['P'], float(timestep[cur_elements['P']]), self._get_wx_units(params, cur_elements['P'])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.pressure = Element(cur_elements['P'], 'Not reported')
if cur_elements['Pt'] in timestep:
new_timestep.pressure_tendency = Element(cur_elements['Pt'], timestep[cur_elements['Pt']], self._get_wx_units(params, cur_elements['Pt'])) # depends on [control=['if'], data=['timestep']]
else:
new_timestep.pressure_tendency = Element(cur_elements['Pt'], 'Not reported')
new_day.timesteps.append(new_timestep) # depends on [control=['for'], data=['timestep']]
observation.days.append(new_day) # depends on [control=['for'], data=['day']]
return observation |
def load_dataframe(self, df_loader_name):
"""
Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes
we may want to just directly load a particular DataFrame.
"""
logger.debug("loading dataframe: {}".format(df_loader_name))
# Get the DataFrameLoader object corresponding to this name.
df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name]
if len(df_loaders) == 0:
raise ValueError("No DataFrameLoader with name %s" % df_loader_name)
if len(df_loaders) > 1:
raise ValueError("Multiple DataFrameLoaders with name %s" % df_loader_name)
return df_loaders[0].load_dataframe() | def function[load_dataframe, parameter[self, df_loader_name]]:
constant[
Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes
we may want to just directly load a particular DataFrame.
]
call[name[logger].debug, parameter[call[constant[loading dataframe: {}].format, parameter[name[df_loader_name]]]]]
variable[df_loaders] assign[=] <ast.ListComp object at 0x7da1b26ae5f0>
if compare[call[name[len], parameter[name[df_loaders]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b26ac610>
if compare[call[name[len], parameter[name[df_loaders]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b26ae6e0>
return[call[call[name[df_loaders]][constant[0]].load_dataframe, parameter[]]] | keyword[def] identifier[load_dataframe] ( identifier[self] , identifier[df_loader_name] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[df_loader_name] ))
identifier[df_loaders] =[ identifier[df_loader] keyword[for] identifier[df_loader] keyword[in] identifier[self] . identifier[df_loaders] keyword[if] identifier[df_loader] . identifier[name] == identifier[df_loader_name] ]
keyword[if] identifier[len] ( identifier[df_loaders] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[df_loader_name] )
keyword[if] identifier[len] ( identifier[df_loaders] )> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[df_loader_name] )
keyword[return] identifier[df_loaders] [ literal[int] ]. identifier[load_dataframe] () | def load_dataframe(self, df_loader_name):
"""
Instead of joining a DataFrameJoiner with the Cohort in `as_dataframe`, sometimes
we may want to just directly load a particular DataFrame.
"""
logger.debug('loading dataframe: {}'.format(df_loader_name))
# Get the DataFrameLoader object corresponding to this name.
df_loaders = [df_loader for df_loader in self.df_loaders if df_loader.name == df_loader_name]
if len(df_loaders) == 0:
raise ValueError('No DataFrameLoader with name %s' % df_loader_name) # depends on [control=['if'], data=[]]
if len(df_loaders) > 1:
raise ValueError('Multiple DataFrameLoaders with name %s' % df_loader_name) # depends on [control=['if'], data=[]]
return df_loaders[0].load_dataframe() |
def _replace_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=None,
config='.ssh/authorized_keys'):
'''
Replace an existing key
'''
auth_line = _format_auth_line(key, enc, comment, options or [])
lines = []
full = _get_config_file(user, config)
try:
# open the file for both reading AND writing
with salt.utils.files.fopen(full, 'r') as _fh:
for line in _fh:
# We don't need any whitespace-only containing lines or arbitrary doubled newlines
line = salt.utils.stringutils.to_unicode(line.strip())
if line == '':
continue
line += '\n'
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = re.findall(r'((.*)\s)?(ssh-[a-z0-9-]+|ecdsa-[a-z0-9-]+)\s([a-zA-Z0-9+/]+={0,2})(\s(.*))?', line)
if comps and len(comps[0]) > 3 and comps[0][3] == key:
# Found our key, replace it
lines.append(auth_line)
else:
lines.append(line)
_fh.close()
# Re-open the file writable after properly closing it
with salt.utils.files.fopen(full, 'wb') as _fh:
# Write out any changes
_fh.writelines(salt.utils.data.encode(lines))
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Problem reading or writing to key file: {0}'.format(exc)
) | def function[_replace_auth_key, parameter[user, key, enc, comment, options, config]]:
constant[
Replace an existing key
]
variable[auth_line] assign[=] call[name[_format_auth_line], parameter[name[key], name[enc], name[comment], <ast.BoolOp object at 0x7da18ede5d50>]]
variable[lines] assign[=] list[[]]
variable[full] assign[=] call[name[_get_config_file], parameter[name[user], name[config]]]
<ast.Try object at 0x7da18ede50f0> | keyword[def] identifier[_replace_auth_key] (
identifier[user] ,
identifier[key] ,
identifier[enc] = literal[string] ,
identifier[comment] = literal[string] ,
identifier[options] = keyword[None] ,
identifier[config] = literal[string] ):
literal[string]
identifier[auth_line] = identifier[_format_auth_line] ( identifier[key] , identifier[enc] , identifier[comment] , identifier[options] keyword[or] [])
identifier[lines] =[]
identifier[full] = identifier[_get_config_file] ( identifier[user] , identifier[config] )
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[full] , literal[string] ) keyword[as] identifier[_fh] :
keyword[for] identifier[line] keyword[in] identifier[_fh] :
identifier[line] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[line] . identifier[strip] ())
keyword[if] identifier[line] == literal[string] :
keyword[continue]
identifier[line] += literal[string]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[lines] . identifier[append] ( identifier[line] )
keyword[continue]
identifier[comps] = identifier[re] . identifier[findall] ( literal[string] , identifier[line] )
keyword[if] identifier[comps] keyword[and] identifier[len] ( identifier[comps] [ literal[int] ])> literal[int] keyword[and] identifier[comps] [ literal[int] ][ literal[int] ]== identifier[key] :
identifier[lines] . identifier[append] ( identifier[auth_line] )
keyword[else] :
identifier[lines] . identifier[append] ( identifier[line] )
identifier[_fh] . identifier[close] ()
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[full] , literal[string] ) keyword[as] identifier[_fh] :
identifier[_fh] . identifier[writelines] ( identifier[salt] . identifier[utils] . identifier[data] . identifier[encode] ( identifier[lines] ))
keyword[except] ( identifier[IOError] , identifier[OSError] ) keyword[as] identifier[exc] :
keyword[raise] identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[exc] )
) | def _replace_auth_key(user, key, enc='ssh-rsa', comment='', options=None, config='.ssh/authorized_keys'):
"""
Replace an existing key
"""
auth_line = _format_auth_line(key, enc, comment, options or [])
lines = []
full = _get_config_file(user, config)
try:
# open the file for both reading AND writing
with salt.utils.files.fopen(full, 'r') as _fh:
for line in _fh:
# We don't need any whitespace-only containing lines or arbitrary doubled newlines
line = salt.utils.stringutils.to_unicode(line.strip())
if line == '':
continue # depends on [control=['if'], data=[]]
line += '\n'
if line.startswith('#'):
# Commented Line
lines.append(line)
continue # depends on [control=['if'], data=[]]
comps = re.findall('((.*)\\s)?(ssh-[a-z0-9-]+|ecdsa-[a-z0-9-]+)\\s([a-zA-Z0-9+/]+={0,2})(\\s(.*))?', line)
if comps and len(comps[0]) > 3 and (comps[0][3] == key):
# Found our key, replace it
lines.append(auth_line) # depends on [control=['if'], data=[]]
else:
lines.append(line) # depends on [control=['for'], data=['line']]
_fh.close()
# Re-open the file writable after properly closing it
with salt.utils.files.fopen(full, 'wb') as _fh:
# Write out any changes
_fh.writelines(salt.utils.data.encode(lines)) # depends on [control=['with'], data=['_fh']] # depends on [control=['with'], data=['_fh']] # depends on [control=['try'], data=[]]
except (IOError, OSError) as exc:
raise CommandExecutionError('Problem reading or writing to key file: {0}'.format(exc)) # depends on [control=['except'], data=['exc']] |
def _generate_examples(self, archive_paths, objects_getter, bboxes_getter,
prefixes=None):
"""Yields examples."""
trainable_classes = set(
self.info.features['objects_trainable']['label'].names)
for i, archive_path in enumerate(archive_paths):
prefix = prefixes[i] if prefixes else None
objects = objects_getter(prefix)
bboxes = bboxes_getter(prefix)
logging.info('Opening archive %s ...', archive_path)
archive = tfds.download.iter_archive(
archive_path, tfds.download.ExtractMethod.TAR_STREAM)
for fpath, fobj in archive:
fname = os.path.basename(fpath)
image_id = int(os.path.splitext(fname)[0], 16)
image_objects = [obj._asdict() for obj in objects.get(image_id, [])]
image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])]
image_objects_trainable = [
obj for obj in image_objects if obj['label'] in trainable_classes
]
yield {
'image': _resize_image_if_necessary(
fobj, target_pixels=self.builder_config.target_pixels),
'image/filename': fname,
'objects': image_objects,
'objects_trainable': image_objects_trainable,
'bobjects': image_bboxes,
} | def function[_generate_examples, parameter[self, archive_paths, objects_getter, bboxes_getter, prefixes]]:
constant[Yields examples.]
variable[trainable_classes] assign[=] call[name[set], parameter[call[call[name[self].info.features][constant[objects_trainable]]][constant[label]].names]]
for taget[tuple[[<ast.Name object at 0x7da1b2062680>, <ast.Name object at 0x7da1b2063280>]]] in starred[call[name[enumerate], parameter[name[archive_paths]]]] begin[:]
variable[prefix] assign[=] <ast.IfExp object at 0x7da1b2060310>
variable[objects] assign[=] call[name[objects_getter], parameter[name[prefix]]]
variable[bboxes] assign[=] call[name[bboxes_getter], parameter[name[prefix]]]
call[name[logging].info, parameter[constant[Opening archive %s ...], name[archive_path]]]
variable[archive] assign[=] call[name[tfds].download.iter_archive, parameter[name[archive_path], name[tfds].download.ExtractMethod.TAR_STREAM]]
for taget[tuple[[<ast.Name object at 0x7da1b2012da0>, <ast.Name object at 0x7da1b2011690>]]] in starred[name[archive]] begin[:]
variable[fname] assign[=] call[name[os].path.basename, parameter[name[fpath]]]
variable[image_id] assign[=] call[name[int], parameter[call[call[name[os].path.splitext, parameter[name[fname]]]][constant[0]], constant[16]]]
variable[image_objects] assign[=] <ast.ListComp object at 0x7da1b2011e70>
variable[image_bboxes] assign[=] <ast.ListComp object at 0x7da1b2011ba0>
variable[image_objects_trainable] assign[=] <ast.ListComp object at 0x7da1b20106a0>
<ast.Yield object at 0x7da1b2010fd0> | keyword[def] identifier[_generate_examples] ( identifier[self] , identifier[archive_paths] , identifier[objects_getter] , identifier[bboxes_getter] ,
identifier[prefixes] = keyword[None] ):
literal[string]
identifier[trainable_classes] = identifier[set] (
identifier[self] . identifier[info] . identifier[features] [ literal[string] ][ literal[string] ]. identifier[names] )
keyword[for] identifier[i] , identifier[archive_path] keyword[in] identifier[enumerate] ( identifier[archive_paths] ):
identifier[prefix] = identifier[prefixes] [ identifier[i] ] keyword[if] identifier[prefixes] keyword[else] keyword[None]
identifier[objects] = identifier[objects_getter] ( identifier[prefix] )
identifier[bboxes] = identifier[bboxes_getter] ( identifier[prefix] )
identifier[logging] . identifier[info] ( literal[string] , identifier[archive_path] )
identifier[archive] = identifier[tfds] . identifier[download] . identifier[iter_archive] (
identifier[archive_path] , identifier[tfds] . identifier[download] . identifier[ExtractMethod] . identifier[TAR_STREAM] )
keyword[for] identifier[fpath] , identifier[fobj] keyword[in] identifier[archive] :
identifier[fname] = identifier[os] . identifier[path] . identifier[basename] ( identifier[fpath] )
identifier[image_id] = identifier[int] ( identifier[os] . identifier[path] . identifier[splitext] ( identifier[fname] )[ literal[int] ], literal[int] )
identifier[image_objects] =[ identifier[obj] . identifier[_asdict] () keyword[for] identifier[obj] keyword[in] identifier[objects] . identifier[get] ( identifier[image_id] ,[])]
identifier[image_bboxes] =[ identifier[bbox] . identifier[_asdict] () keyword[for] identifier[bbox] keyword[in] identifier[bboxes] . identifier[get] ( identifier[image_id] ,[])]
identifier[image_objects_trainable] =[
identifier[obj] keyword[for] identifier[obj] keyword[in] identifier[image_objects] keyword[if] identifier[obj] [ literal[string] ] keyword[in] identifier[trainable_classes]
]
keyword[yield] {
literal[string] : identifier[_resize_image_if_necessary] (
identifier[fobj] , identifier[target_pixels] = identifier[self] . identifier[builder_config] . identifier[target_pixels] ),
literal[string] : identifier[fname] ,
literal[string] : identifier[image_objects] ,
literal[string] : identifier[image_objects_trainable] ,
literal[string] : identifier[image_bboxes] ,
} | def _generate_examples(self, archive_paths, objects_getter, bboxes_getter, prefixes=None):
"""Yields examples."""
trainable_classes = set(self.info.features['objects_trainable']['label'].names)
for (i, archive_path) in enumerate(archive_paths):
prefix = prefixes[i] if prefixes else None
objects = objects_getter(prefix)
bboxes = bboxes_getter(prefix)
logging.info('Opening archive %s ...', archive_path)
archive = tfds.download.iter_archive(archive_path, tfds.download.ExtractMethod.TAR_STREAM)
for (fpath, fobj) in archive:
fname = os.path.basename(fpath)
image_id = int(os.path.splitext(fname)[0], 16)
image_objects = [obj._asdict() for obj in objects.get(image_id, [])]
image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])]
image_objects_trainable = [obj for obj in image_objects if obj['label'] in trainable_classes]
yield {'image': _resize_image_if_necessary(fobj, target_pixels=self.builder_config.target_pixels), 'image/filename': fname, 'objects': image_objects, 'objects_trainable': image_objects_trainable, 'bobjects': image_bboxes} # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.