code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def save_dict_to_hdf5(dic, filename):
"""
....
"""
with h5py.File(filename, 'w') as h5file:
rf = recursively_save_dict_contents_to_group(h5file, '/', dic)
h5_rf = h5file.create_group("_reconstruction_flags")
# h5_rf = h5file.create_group("_reconstruction_key_flags")
for k, v in rf.items():
h5_rf.create_dataset("/_reconstruction_flags" + k, data=v) | def function[save_dict_to_hdf5, parameter[dic, filename]]:
constant[
....
]
with call[name[h5py].File, parameter[name[filename], constant[w]]] begin[:]
variable[rf] assign[=] call[name[recursively_save_dict_contents_to_group], parameter[name[h5file], constant[/], name[dic]]]
variable[h5_rf] assign[=] call[name[h5file].create_group, parameter[constant[_reconstruction_flags]]]
for taget[tuple[[<ast.Name object at 0x7da1b1874a60>, <ast.Name object at 0x7da1b18757b0>]]] in starred[call[name[rf].items, parameter[]]] begin[:]
call[name[h5_rf].create_dataset, parameter[binary_operation[constant[/_reconstruction_flags] + name[k]]]] | keyword[def] identifier[save_dict_to_hdf5] ( identifier[dic] , identifier[filename] ):
literal[string]
keyword[with] identifier[h5py] . identifier[File] ( identifier[filename] , literal[string] ) keyword[as] identifier[h5file] :
identifier[rf] = identifier[recursively_save_dict_contents_to_group] ( identifier[h5file] , literal[string] , identifier[dic] )
identifier[h5_rf] = identifier[h5file] . identifier[create_group] ( literal[string] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[rf] . identifier[items] ():
identifier[h5_rf] . identifier[create_dataset] ( literal[string] + identifier[k] , identifier[data] = identifier[v] ) | def save_dict_to_hdf5(dic, filename):
"""
....
"""
with h5py.File(filename, 'w') as h5file:
rf = recursively_save_dict_contents_to_group(h5file, '/', dic)
h5_rf = h5file.create_group('_reconstruction_flags')
# h5_rf = h5file.create_group("_reconstruction_key_flags")
for (k, v) in rf.items():
h5_rf.create_dataset('/_reconstruction_flags' + k, data=v) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['h5file']] |
def load(self, env=None):
""" Load a section values of given environment.
If nothing to specified, use environmental variable.
If unknown environment was specified, warn it on logger.
:param env: environment key to load in a coercive manner
:type env: string
:rtype: dict
"""
self._load()
e = env or \
os.environ.get(RUNNING_MODE_ENVKEY, DEFAULT_RUNNING_MODE)
if e in self.config:
return self.config[e]
logging.warn("Environment '%s' was not found.", e) | def function[load, parameter[self, env]]:
constant[ Load a section values of given environment.
If nothing to specified, use environmental variable.
If unknown environment was specified, warn it on logger.
:param env: environment key to load in a coercive manner
:type env: string
:rtype: dict
]
call[name[self]._load, parameter[]]
variable[e] assign[=] <ast.BoolOp object at 0x7da204620b50>
if compare[name[e] in name[self].config] begin[:]
return[call[name[self].config][name[e]]]
call[name[logging].warn, parameter[constant[Environment '%s' was not found.], name[e]]] | keyword[def] identifier[load] ( identifier[self] , identifier[env] = keyword[None] ):
literal[string]
identifier[self] . identifier[_load] ()
identifier[e] = identifier[env] keyword[or] identifier[os] . identifier[environ] . identifier[get] ( identifier[RUNNING_MODE_ENVKEY] , identifier[DEFAULT_RUNNING_MODE] )
keyword[if] identifier[e] keyword[in] identifier[self] . identifier[config] :
keyword[return] identifier[self] . identifier[config] [ identifier[e] ]
identifier[logging] . identifier[warn] ( literal[string] , identifier[e] ) | def load(self, env=None):
""" Load a section values of given environment.
If nothing to specified, use environmental variable.
If unknown environment was specified, warn it on logger.
:param env: environment key to load in a coercive manner
:type env: string
:rtype: dict
"""
self._load()
e = env or os.environ.get(RUNNING_MODE_ENVKEY, DEFAULT_RUNNING_MODE)
if e in self.config:
return self.config[e] # depends on [control=['if'], data=['e']]
logging.warn("Environment '%s' was not found.", e) |
def set_show_all(self, state):
"""Toggle 'show all files' state"""
if state:
self.fsmodel.setNameFilters([])
else:
self.fsmodel.setNameFilters(self.name_filters) | def function[set_show_all, parameter[self, state]]:
constant[Toggle 'show all files' state]
if name[state] begin[:]
call[name[self].fsmodel.setNameFilters, parameter[list[[]]]] | keyword[def] identifier[set_show_all] ( identifier[self] , identifier[state] ):
literal[string]
keyword[if] identifier[state] :
identifier[self] . identifier[fsmodel] . identifier[setNameFilters] ([])
keyword[else] :
identifier[self] . identifier[fsmodel] . identifier[setNameFilters] ( identifier[self] . identifier[name_filters] ) | def set_show_all(self, state):
"""Toggle 'show all files' state"""
if state:
self.fsmodel.setNameFilters([]) # depends on [control=['if'], data=[]]
else:
self.fsmodel.setNameFilters(self.name_filters) |
def critical(cls, name, message, *args):
"""
Convenience function to log a message at the CRITICAL level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).critical(message, *args) | def function[critical, parameter[cls, name, message]]:
constant[
Convenience function to log a message at the CRITICAL level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
]
call[call[name[cls].getLogger, parameter[name[name]]].critical, parameter[name[message], <ast.Starred object at 0x7da2054a5c60>]] | keyword[def] identifier[critical] ( identifier[cls] , identifier[name] , identifier[message] ,* identifier[args] ):
literal[string]
identifier[cls] . identifier[getLogger] ( identifier[name] ). identifier[critical] ( identifier[message] ,* identifier[args] ) | def critical(cls, name, message, *args):
"""
Convenience function to log a message at the CRITICAL level.
:param name: The name of the logger instance in the VSG namespace (VSG.<name>)
:param message: A message format string.
:param args: The arguments that are are merged into msg using the string formatting operator.
:..note: The native logger's `kwargs` are not used in this function.
"""
cls.getLogger(name).critical(message, *args) |
def find_matching_endpoints(self, discovery_ns):
"""
Compute current matching endpoints.
Evaluated as a property to defer evaluation.
"""
def match_func(operation, ns, rule):
return operation in self.matching_operations
return list(iter_endpoints(self.graph, match_func)) | def function[find_matching_endpoints, parameter[self, discovery_ns]]:
constant[
Compute current matching endpoints.
Evaluated as a property to defer evaluation.
]
def function[match_func, parameter[operation, ns, rule]]:
return[compare[name[operation] in name[self].matching_operations]]
return[call[name[list], parameter[call[name[iter_endpoints], parameter[name[self].graph, name[match_func]]]]]] | keyword[def] identifier[find_matching_endpoints] ( identifier[self] , identifier[discovery_ns] ):
literal[string]
keyword[def] identifier[match_func] ( identifier[operation] , identifier[ns] , identifier[rule] ):
keyword[return] identifier[operation] keyword[in] identifier[self] . identifier[matching_operations]
keyword[return] identifier[list] ( identifier[iter_endpoints] ( identifier[self] . identifier[graph] , identifier[match_func] )) | def find_matching_endpoints(self, discovery_ns):
"""
Compute current matching endpoints.
Evaluated as a property to defer evaluation.
"""
def match_func(operation, ns, rule):
return operation in self.matching_operations
return list(iter_endpoints(self.graph, match_func)) |
def splits(cls, text_field, label_field, parse_field=None,
extra_fields={}, root='.data', train='train.jsonl',
validation='val.jsonl', test='test.jsonl'):
"""Create dataset objects for splits of the SNLI dataset.
This is the most flexible way to use the dataset.
Arguments:
text_field: The field that will be used for premise and hypothesis
data.
label_field: The field that will be used for label data.
parse_field: The field that will be used for shift-reduce parser
transitions, or None to not include them.
extra_fields: A dict[json_key: Tuple(field_name, Field)]
root: The root directory that the dataset's zip archive will be
expanded into.
train: The filename of the train data. Default: 'train.jsonl'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'dev.jsonl'.
test: The filename of the test data, or None to not load the test
set. Default: 'test.jsonl'.
"""
path = cls.download(root)
if parse_field is None:
fields = {'sentence1': ('premise', text_field),
'sentence2': ('hypothesis', text_field),
'gold_label': ('label', label_field)}
else:
fields = {'sentence1_binary_parse': [('premise', text_field),
('premise_transitions', parse_field)],
'sentence2_binary_parse': [('hypothesis', text_field),
('hypothesis_transitions', parse_field)],
'gold_label': ('label', label_field)}
for key in extra_fields:
if key not in fields.keys():
fields[key] = extra_fields[key]
return super(NLIDataset, cls).splits(
path, root, train, validation, test,
format='json', fields=fields,
filter_pred=lambda ex: ex.label != '-') | def function[splits, parameter[cls, text_field, label_field, parse_field, extra_fields, root, train, validation, test]]:
constant[Create dataset objects for splits of the SNLI dataset.
This is the most flexible way to use the dataset.
Arguments:
text_field: The field that will be used for premise and hypothesis
data.
label_field: The field that will be used for label data.
parse_field: The field that will be used for shift-reduce parser
transitions, or None to not include them.
extra_fields: A dict[json_key: Tuple(field_name, Field)]
root: The root directory that the dataset's zip archive will be
expanded into.
train: The filename of the train data. Default: 'train.jsonl'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'dev.jsonl'.
test: The filename of the test data, or None to not load the test
set. Default: 'test.jsonl'.
]
variable[path] assign[=] call[name[cls].download, parameter[name[root]]]
if compare[name[parse_field] is constant[None]] begin[:]
variable[fields] assign[=] dictionary[[<ast.Constant object at 0x7da1b216db10>, <ast.Constant object at 0x7da1b216fca0>, <ast.Constant object at 0x7da1b216f250>], [<ast.Tuple object at 0x7da1b216f820>, <ast.Tuple object at 0x7da1b216c850>, <ast.Tuple object at 0x7da1b216f1c0>]]
for taget[name[key]] in starred[name[extra_fields]] begin[:]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> call[name[fields].keys, parameter[]]] begin[:]
call[name[fields]][name[key]] assign[=] call[name[extra_fields]][name[key]]
return[call[call[name[super], parameter[name[NLIDataset], name[cls]]].splits, parameter[name[path], name[root], name[train], name[validation], name[test]]]] | keyword[def] identifier[splits] ( identifier[cls] , identifier[text_field] , identifier[label_field] , identifier[parse_field] = keyword[None] ,
identifier[extra_fields] ={}, identifier[root] = literal[string] , identifier[train] = literal[string] ,
identifier[validation] = literal[string] , identifier[test] = literal[string] ):
literal[string]
identifier[path] = identifier[cls] . identifier[download] ( identifier[root] )
keyword[if] identifier[parse_field] keyword[is] keyword[None] :
identifier[fields] ={ literal[string] :( literal[string] , identifier[text_field] ),
literal[string] :( literal[string] , identifier[text_field] ),
literal[string] :( literal[string] , identifier[label_field] )}
keyword[else] :
identifier[fields] ={ literal[string] :[( literal[string] , identifier[text_field] ),
( literal[string] , identifier[parse_field] )],
literal[string] :[( literal[string] , identifier[text_field] ),
( literal[string] , identifier[parse_field] )],
literal[string] :( literal[string] , identifier[label_field] )}
keyword[for] identifier[key] keyword[in] identifier[extra_fields] :
keyword[if] identifier[key] keyword[not] keyword[in] identifier[fields] . identifier[keys] ():
identifier[fields] [ identifier[key] ]= identifier[extra_fields] [ identifier[key] ]
keyword[return] identifier[super] ( identifier[NLIDataset] , identifier[cls] ). identifier[splits] (
identifier[path] , identifier[root] , identifier[train] , identifier[validation] , identifier[test] ,
identifier[format] = literal[string] , identifier[fields] = identifier[fields] ,
identifier[filter_pred] = keyword[lambda] identifier[ex] : identifier[ex] . identifier[label] != literal[string] ) | def splits(cls, text_field, label_field, parse_field=None, extra_fields={}, root='.data', train='train.jsonl', validation='val.jsonl', test='test.jsonl'):
"""Create dataset objects for splits of the SNLI dataset.
This is the most flexible way to use the dataset.
Arguments:
text_field: The field that will be used for premise and hypothesis
data.
label_field: The field that will be used for label data.
parse_field: The field that will be used for shift-reduce parser
transitions, or None to not include them.
extra_fields: A dict[json_key: Tuple(field_name, Field)]
root: The root directory that the dataset's zip archive will be
expanded into.
train: The filename of the train data. Default: 'train.jsonl'.
validation: The filename of the validation data, or None to not
load the validation set. Default: 'dev.jsonl'.
test: The filename of the test data, or None to not load the test
set. Default: 'test.jsonl'.
"""
path = cls.download(root)
if parse_field is None:
fields = {'sentence1': ('premise', text_field), 'sentence2': ('hypothesis', text_field), 'gold_label': ('label', label_field)} # depends on [control=['if'], data=[]]
else:
fields = {'sentence1_binary_parse': [('premise', text_field), ('premise_transitions', parse_field)], 'sentence2_binary_parse': [('hypothesis', text_field), ('hypothesis_transitions', parse_field)], 'gold_label': ('label', label_field)}
for key in extra_fields:
if key not in fields.keys():
fields[key] = extra_fields[key] # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
return super(NLIDataset, cls).splits(path, root, train, validation, test, format='json', fields=fields, filter_pred=lambda ex: ex.label != '-') |
def _add_orfs(self, which, symbol, ind, val, dt_log=None, user=None, comment=None):
"""
Appends a single indexed-value pair, to a symbol object, to be
used during the final steps of the aggregation of the datatable.
See add_override and add_fail_safe.
Parameters
----------
which : str
Fail Safe or Override?
symbol : Symbol or str
The Symbol to apply the fail safe
ind : obj
The index value where the fail safe should be applied
val : obj
The data value which will be used in the fail safe
dt_log : datetime
A log entry, for saving when this fail safe was created.
user : str
A string representing which user made the fail safe
comment : str
A string to store any notes related to this fail safe.
"""
if not isinstance(symbol, (str, unicode)):
symbol = symbol.name
if not dt_log:
dt_log = dt.datetime.now()
if which.lower() == 'override':
qry = self.ses.query(func.max(Override.ornum).label('max_ornum'))
override = True
elif which.lower() == 'failsafe':
qry = self.ses.query(func.max(FailSafe.fsnum).label('max_fsnum'))
override = False
qry = qry.filter_by(symname = symbol)
cur_num = qry.one()
if cur_num[0] is None:
next_num = 0
else:
next_num = cur_num[0] + 1
if override:
tmp = Override(symname=symbol,
ind=ind,
val=val,
dt_log=dt_log,
user=user,
comment=comment,
ornum=next_num)
else:
tmp = FailSafe(symname=symbol,
ind=ind,
val=val,
dt_log=dt_log,
user=user,
comment=comment,
fsnum=next_num)
self.ses.add(tmp)
self.ses.commit() | def function[_add_orfs, parameter[self, which, symbol, ind, val, dt_log, user, comment]]:
constant[
Appends a single indexed-value pair, to a symbol object, to be
used during the final steps of the aggregation of the datatable.
See add_override and add_fail_safe.
Parameters
----------
which : str
Fail Safe or Override?
symbol : Symbol or str
The Symbol to apply the fail safe
ind : obj
The index value where the fail safe should be applied
val : obj
The data value which will be used in the fail safe
dt_log : datetime
A log entry, for saving when this fail safe was created.
user : str
A string representing which user made the fail safe
comment : str
A string to store any notes related to this fail safe.
]
if <ast.UnaryOp object at 0x7da20c9924d0> begin[:]
variable[symbol] assign[=] name[symbol].name
if <ast.UnaryOp object at 0x7da20c990400> begin[:]
variable[dt_log] assign[=] call[name[dt].datetime.now, parameter[]]
if compare[call[name[which].lower, parameter[]] equal[==] constant[override]] begin[:]
variable[qry] assign[=] call[name[self].ses.query, parameter[call[call[name[func].max, parameter[name[Override].ornum]].label, parameter[constant[max_ornum]]]]]
variable[override] assign[=] constant[True]
variable[qry] assign[=] call[name[qry].filter_by, parameter[]]
variable[cur_num] assign[=] call[name[qry].one, parameter[]]
if compare[call[name[cur_num]][constant[0]] is constant[None]] begin[:]
variable[next_num] assign[=] constant[0]
if name[override] begin[:]
variable[tmp] assign[=] call[name[Override], parameter[]]
call[name[self].ses.add, parameter[name[tmp]]]
call[name[self].ses.commit, parameter[]] | keyword[def] identifier[_add_orfs] ( identifier[self] , identifier[which] , identifier[symbol] , identifier[ind] , identifier[val] , identifier[dt_log] = keyword[None] , identifier[user] = keyword[None] , identifier[comment] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[symbol] ,( identifier[str] , identifier[unicode] )):
identifier[symbol] = identifier[symbol] . identifier[name]
keyword[if] keyword[not] identifier[dt_log] :
identifier[dt_log] = identifier[dt] . identifier[datetime] . identifier[now] ()
keyword[if] identifier[which] . identifier[lower] ()== literal[string] :
identifier[qry] = identifier[self] . identifier[ses] . identifier[query] ( identifier[func] . identifier[max] ( identifier[Override] . identifier[ornum] ). identifier[label] ( literal[string] ))
identifier[override] = keyword[True]
keyword[elif] identifier[which] . identifier[lower] ()== literal[string] :
identifier[qry] = identifier[self] . identifier[ses] . identifier[query] ( identifier[func] . identifier[max] ( identifier[FailSafe] . identifier[fsnum] ). identifier[label] ( literal[string] ))
identifier[override] = keyword[False]
identifier[qry] = identifier[qry] . identifier[filter_by] ( identifier[symname] = identifier[symbol] )
identifier[cur_num] = identifier[qry] . identifier[one] ()
keyword[if] identifier[cur_num] [ literal[int] ] keyword[is] keyword[None] :
identifier[next_num] = literal[int]
keyword[else] :
identifier[next_num] = identifier[cur_num] [ literal[int] ]+ literal[int]
keyword[if] identifier[override] :
identifier[tmp] = identifier[Override] ( identifier[symname] = identifier[symbol] ,
identifier[ind] = identifier[ind] ,
identifier[val] = identifier[val] ,
identifier[dt_log] = identifier[dt_log] ,
identifier[user] = identifier[user] ,
identifier[comment] = identifier[comment] ,
identifier[ornum] = identifier[next_num] )
keyword[else] :
identifier[tmp] = identifier[FailSafe] ( identifier[symname] = identifier[symbol] ,
identifier[ind] = identifier[ind] ,
identifier[val] = identifier[val] ,
identifier[dt_log] = identifier[dt_log] ,
identifier[user] = identifier[user] ,
identifier[comment] = identifier[comment] ,
identifier[fsnum] = identifier[next_num] )
identifier[self] . identifier[ses] . identifier[add] ( identifier[tmp] )
identifier[self] . identifier[ses] . identifier[commit] () | def _add_orfs(self, which, symbol, ind, val, dt_log=None, user=None, comment=None):
"""
Appends a single indexed-value pair, to a symbol object, to be
used during the final steps of the aggregation of the datatable.
See add_override and add_fail_safe.
Parameters
----------
which : str
Fail Safe or Override?
symbol : Symbol or str
The Symbol to apply the fail safe
ind : obj
The index value where the fail safe should be applied
val : obj
The data value which will be used in the fail safe
dt_log : datetime
A log entry, for saving when this fail safe was created.
user : str
A string representing which user made the fail safe
comment : str
A string to store any notes related to this fail safe.
"""
if not isinstance(symbol, (str, unicode)):
symbol = symbol.name # depends on [control=['if'], data=[]]
if not dt_log:
dt_log = dt.datetime.now() # depends on [control=['if'], data=[]]
if which.lower() == 'override':
qry = self.ses.query(func.max(Override.ornum).label('max_ornum'))
override = True # depends on [control=['if'], data=[]]
elif which.lower() == 'failsafe':
qry = self.ses.query(func.max(FailSafe.fsnum).label('max_fsnum'))
override = False # depends on [control=['if'], data=[]]
qry = qry.filter_by(symname=symbol)
cur_num = qry.one()
if cur_num[0] is None:
next_num = 0 # depends on [control=['if'], data=[]]
else:
next_num = cur_num[0] + 1
if override:
tmp = Override(symname=symbol, ind=ind, val=val, dt_log=dt_log, user=user, comment=comment, ornum=next_num) # depends on [control=['if'], data=[]]
else:
tmp = FailSafe(symname=symbol, ind=ind, val=val, dt_log=dt_log, user=user, comment=comment, fsnum=next_num)
self.ses.add(tmp)
self.ses.commit() |
async def reply(self, *args, **kwargs):
"""
Replies to the message (as a reply). Shorthand for
`telethon.client.messages.MessageMethods.send_message`
with both ``entity`` and ``reply_to`` already set.
"""
kwargs['reply_to'] = self.id
return await self._client.send_message(
await self.get_input_chat(), *args, **kwargs) | <ast.AsyncFunctionDef object at 0x7da18ede7be0> | keyword[async] keyword[def] identifier[reply] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[id]
keyword[return] keyword[await] identifier[self] . identifier[_client] . identifier[send_message] (
keyword[await] identifier[self] . identifier[get_input_chat] (),* identifier[args] ,** identifier[kwargs] ) | async def reply(self, *args, **kwargs):
"""
Replies to the message (as a reply). Shorthand for
`telethon.client.messages.MessageMethods.send_message`
with both ``entity`` and ``reply_to`` already set.
"""
kwargs['reply_to'] = self.id
return await self._client.send_message(await self.get_input_chat(), *args, **kwargs) |
def _get_controller_help(self, controller):
"""Return the value of the HELP attribute for a controller that should
describe the functionality of the controller.
:rtype: str|None
"""
if hasattr(self._controllers[controller], 'HELP'):
return self._controllers[controller].HELP
return None | def function[_get_controller_help, parameter[self, controller]]:
constant[Return the value of the HELP attribute for a controller that should
describe the functionality of the controller.
:rtype: str|None
]
if call[name[hasattr], parameter[call[name[self]._controllers][name[controller]], constant[HELP]]] begin[:]
return[call[name[self]._controllers][name[controller]].HELP]
return[constant[None]] | keyword[def] identifier[_get_controller_help] ( identifier[self] , identifier[controller] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] . identifier[_controllers] [ identifier[controller] ], literal[string] ):
keyword[return] identifier[self] . identifier[_controllers] [ identifier[controller] ]. identifier[HELP]
keyword[return] keyword[None] | def _get_controller_help(self, controller):
"""Return the value of the HELP attribute for a controller that should
describe the functionality of the controller.
:rtype: str|None
"""
if hasattr(self._controllers[controller], 'HELP'):
return self._controllers[controller].HELP # depends on [control=['if'], data=[]]
return None |
def execute_python_script(self, script):
"""
Execute a python script of the remote server
:param script: Inline script to convert to a file and execute remotely
:return: The output of the script execution
"""
# Create the local file to copy to remote
file_handle, filename = tempfile.mkstemp()
temp_file = os.fdopen(file_handle, "wt")
temp_file.write(script)
temp_file.close()
# Put the file into the remote user directory
self.put(filename, "python_execute.py")
command = ["python", "python_execute.py"]
# Execute the python script on the remote system, clean up, and return the output
output = self.execute(command, False)
self.remove("python_execute.py")
os.unlink(filename)
return output | def function[execute_python_script, parameter[self, script]]:
constant[
Execute a python script of the remote server
:param script: Inline script to convert to a file and execute remotely
:return: The output of the script execution
]
<ast.Tuple object at 0x7da1b12aab60> assign[=] call[name[tempfile].mkstemp, parameter[]]
variable[temp_file] assign[=] call[name[os].fdopen, parameter[name[file_handle], constant[wt]]]
call[name[temp_file].write, parameter[name[script]]]
call[name[temp_file].close, parameter[]]
call[name[self].put, parameter[name[filename], constant[python_execute.py]]]
variable[command] assign[=] list[[<ast.Constant object at 0x7da1b12aa1d0>, <ast.Constant object at 0x7da1b12a9fc0>]]
variable[output] assign[=] call[name[self].execute, parameter[name[command], constant[False]]]
call[name[self].remove, parameter[constant[python_execute.py]]]
call[name[os].unlink, parameter[name[filename]]]
return[name[output]] | keyword[def] identifier[execute_python_script] ( identifier[self] , identifier[script] ):
literal[string]
identifier[file_handle] , identifier[filename] = identifier[tempfile] . identifier[mkstemp] ()
identifier[temp_file] = identifier[os] . identifier[fdopen] ( identifier[file_handle] , literal[string] )
identifier[temp_file] . identifier[write] ( identifier[script] )
identifier[temp_file] . identifier[close] ()
identifier[self] . identifier[put] ( identifier[filename] , literal[string] )
identifier[command] =[ literal[string] , literal[string] ]
identifier[output] = identifier[self] . identifier[execute] ( identifier[command] , keyword[False] )
identifier[self] . identifier[remove] ( literal[string] )
identifier[os] . identifier[unlink] ( identifier[filename] )
keyword[return] identifier[output] | def execute_python_script(self, script):
"""
Execute a python script of the remote server
:param script: Inline script to convert to a file and execute remotely
:return: The output of the script execution
"""
# Create the local file to copy to remote
(file_handle, filename) = tempfile.mkstemp()
temp_file = os.fdopen(file_handle, 'wt')
temp_file.write(script)
temp_file.close()
# Put the file into the remote user directory
self.put(filename, 'python_execute.py')
command = ['python', 'python_execute.py']
# Execute the python script on the remote system, clean up, and return the output
output = self.execute(command, False)
self.remove('python_execute.py')
os.unlink(filename)
return output |
def set(self, key, value):
""" Set a key's value regardless of whether a change is seen."""
return self.__setitem__(key, value, force=True) | def function[set, parameter[self, key, value]]:
constant[ Set a key's value regardless of whether a change is seen.]
return[call[name[self].__setitem__, parameter[name[key], name[value]]]] | keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
keyword[return] identifier[self] . identifier[__setitem__] ( identifier[key] , identifier[value] , identifier[force] = keyword[True] ) | def set(self, key, value):
""" Set a key's value regardless of whether a change is seen."""
return self.__setitem__(key, value, force=True) |
def similar(self, threshold, **criterias):
'''Find text-based field matches with similarity (1-levenshtein/length)
higher than specified threshold (0 to 1, 1 being an exact match)'''
# XXX: use F from https://docs.djangoproject.com/en/1.8/ref/models/expressions/
meta = self.model._meta
funcs, params = list(), list()
for name,val in criterias.iteritems():
name = meta.get_field(name, many_to_many=False).column
name = '.'.join(it.imap(connection.ops.quote_name, (meta.db_table, name)))
# Alas, pg_trgm is for containment tests, not fuzzy matches,
# but it can potentially be used to find closest results as well
# funcs.append( 'similarity(CAST({0}.{1} as text), CAST(%s as text))'\
# Ok, these two are just to make sure levenshtein() won't crash
# w/ "argument exceeds the maximum length of N bytes error"
funcs.append('octet_length({0}) <= {1}'.format(name, self.levenshtein_limit))
funcs.append('octet_length(%s) <= {0}'.format(self.levenshtein_limit))
# Then there's a possibility of division by zero...
funcs.append('length({0}) > 0'.format(name))
# And if everything else fits, the comparison itself
funcs.append('levenshtein({0}, %s) / CAST(length({0}) AS numeric) < %s'.format(name))
params.extend((val, val, float(1 - threshold)))
return self.extra(where=funcs, params=params) | def function[similar, parameter[self, threshold]]:
constant[Find text-based field matches with similarity (1-levenshtein/length)
higher than specified threshold (0 to 1, 1 being an exact match)]
variable[meta] assign[=] name[self].model._meta
<ast.Tuple object at 0x7da18fe92740> assign[=] tuple[[<ast.Call object at 0x7da18fe91300>, <ast.Call object at 0x7da18fe908e0>]]
for taget[tuple[[<ast.Name object at 0x7da18fe930a0>, <ast.Name object at 0x7da18fe927d0>]]] in starred[call[name[criterias].iteritems, parameter[]]] begin[:]
variable[name] assign[=] call[name[meta].get_field, parameter[name[name]]].column
variable[name] assign[=] call[constant[.].join, parameter[call[name[it].imap, parameter[name[connection].ops.quote_name, tuple[[<ast.Attribute object at 0x7da18fe923e0>, <ast.Name object at 0x7da18fe91bd0>]]]]]]
call[name[funcs].append, parameter[call[constant[octet_length({0}) <= {1}].format, parameter[name[name], name[self].levenshtein_limit]]]]
call[name[funcs].append, parameter[call[constant[octet_length(%s) <= {0}].format, parameter[name[self].levenshtein_limit]]]]
call[name[funcs].append, parameter[call[constant[length({0}) > 0].format, parameter[name[name]]]]]
call[name[funcs].append, parameter[call[constant[levenshtein({0}, %s) / CAST(length({0}) AS numeric) < %s].format, parameter[name[name]]]]]
call[name[params].extend, parameter[tuple[[<ast.Name object at 0x7da18fe933d0>, <ast.Name object at 0x7da18fe92cb0>, <ast.Call object at 0x7da18fe917e0>]]]]
return[call[name[self].extra, parameter[]]] | keyword[def] identifier[similar] ( identifier[self] , identifier[threshold] ,** identifier[criterias] ):
literal[string]
identifier[meta] = identifier[self] . identifier[model] . identifier[_meta]
identifier[funcs] , identifier[params] = identifier[list] (), identifier[list] ()
keyword[for] identifier[name] , identifier[val] keyword[in] identifier[criterias] . identifier[iteritems] ():
identifier[name] = identifier[meta] . identifier[get_field] ( identifier[name] , identifier[many_to_many] = keyword[False] ). identifier[column]
identifier[name] = literal[string] . identifier[join] ( identifier[it] . identifier[imap] ( identifier[connection] . identifier[ops] . identifier[quote_name] ,( identifier[meta] . identifier[db_table] , identifier[name] )))
identifier[funcs] . identifier[append] ( literal[string] . identifier[format] ( identifier[name] , identifier[self] . identifier[levenshtein_limit] ))
identifier[funcs] . identifier[append] ( literal[string] . identifier[format] ( identifier[self] . identifier[levenshtein_limit] ))
identifier[funcs] . identifier[append] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[funcs] . identifier[append] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[params] . identifier[extend] (( identifier[val] , identifier[val] , identifier[float] ( literal[int] - identifier[threshold] )))
keyword[return] identifier[self] . identifier[extra] ( identifier[where] = identifier[funcs] , identifier[params] = identifier[params] ) | def similar(self, threshold, **criterias):
"""Find text-based field matches with similarity (1-levenshtein/length)
higher than specified threshold (0 to 1, 1 being an exact match)""" # XXX: use F from https://docs.djangoproject.com/en/1.8/ref/models/expressions/
meta = self.model._meta
(funcs, params) = (list(), list())
for (name, val) in criterias.iteritems():
name = meta.get_field(name, many_to_many=False).column
name = '.'.join(it.imap(connection.ops.quote_name, (meta.db_table, name))) # Alas, pg_trgm is for containment tests, not fuzzy matches,
# but it can potentially be used to find closest results as well
# funcs.append( 'similarity(CAST({0}.{1} as text), CAST(%s as text))'\
# Ok, these two are just to make sure levenshtein() won't crash
# w/ "argument exceeds the maximum length of N bytes error"
funcs.append('octet_length({0}) <= {1}'.format(name, self.levenshtein_limit))
funcs.append('octet_length(%s) <= {0}'.format(self.levenshtein_limit)) # Then there's a possibility of division by zero...
funcs.append('length({0}) > 0'.format(name)) # And if everything else fits, the comparison itself
funcs.append('levenshtein({0}, %s) / CAST(length({0}) AS numeric) < %s'.format(name))
params.extend((val, val, float(1 - threshold))) # depends on [control=['for'], data=[]]
return self.extra(where=funcs, params=params) |
def _build_url(self, endpoint):
"""
Builds the absolute URL using the target and desired endpoint.
"""
try:
path = self.endpoints[endpoint]
except KeyError:
msg = 'Unknown endpoint `{0}`'
raise ValueError(msg.format(endpoint))
absolute_url = urljoin(self.target, path)
return absolute_url | def function[_build_url, parameter[self, endpoint]]:
constant[
Builds the absolute URL using the target and desired endpoint.
]
<ast.Try object at 0x7da1b063cf70>
variable[absolute_url] assign[=] call[name[urljoin], parameter[name[self].target, name[path]]]
return[name[absolute_url]] | keyword[def] identifier[_build_url] ( identifier[self] , identifier[endpoint] ):
literal[string]
keyword[try] :
identifier[path] = identifier[self] . identifier[endpoints] [ identifier[endpoint] ]
keyword[except] identifier[KeyError] :
identifier[msg] = literal[string]
keyword[raise] identifier[ValueError] ( identifier[msg] . identifier[format] ( identifier[endpoint] ))
identifier[absolute_url] = identifier[urljoin] ( identifier[self] . identifier[target] , identifier[path] )
keyword[return] identifier[absolute_url] | def _build_url(self, endpoint):
"""
Builds the absolute URL using the target and desired endpoint.
"""
try:
path = self.endpoints[endpoint] # depends on [control=['try'], data=[]]
except KeyError:
msg = 'Unknown endpoint `{0}`'
raise ValueError(msg.format(endpoint)) # depends on [control=['except'], data=[]]
absolute_url = urljoin(self.target, path)
return absolute_url |
def _winreg_getShellFolder( name ):
"""Get a shell folder by string name from the registry"""
k = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
try:
# should check that it's valid? How?
return _winreg.QueryValueEx( k, name )[0]
finally:
_winreg.CloseKey( k ) | def function[_winreg_getShellFolder, parameter[name]]:
constant[Get a shell folder by string name from the registry]
variable[k] assign[=] call[name[_winreg].OpenKey, parameter[name[_winreg].HKEY_CURRENT_USER, constant[Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders]]]
<ast.Try object at 0x7da18f723730> | keyword[def] identifier[_winreg_getShellFolder] ( identifier[name] ):
literal[string]
identifier[k] = identifier[_winreg] . identifier[OpenKey] (
identifier[_winreg] . identifier[HKEY_CURRENT_USER] ,
literal[string]
)
keyword[try] :
keyword[return] identifier[_winreg] . identifier[QueryValueEx] ( identifier[k] , identifier[name] )[ literal[int] ]
keyword[finally] :
identifier[_winreg] . identifier[CloseKey] ( identifier[k] ) | def _winreg_getShellFolder(name):
"""Get a shell folder by string name from the registry"""
k = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')
try:
# should check that it's valid? How?
return _winreg.QueryValueEx(k, name)[0] # depends on [control=['try'], data=[]]
finally:
_winreg.CloseKey(k) |
def login_with_api_key(self, email, api_key, application='Default'):
"""
Login and get a token. If you do not specify a specific application,
'Default' will be used.
:param email: Email address of the user
:type email: string
:param api_key: API key assigned to the user
:type api_key: string
:param application: (optional) Application designated for this API key
:type application: string
:returns: Token to be used for interaction with the API until
expiration
:rtype: string
"""
parameters = dict()
parameters['email'] = BaseDriver.email = email # Cache email
parameters['apikey'] = BaseDriver.apikey = api_key # Cache API key
parameters['appname'] = application
response = self.request('midas.login', parameters)
if 'token' in response: # normal case
return response['token']
if 'mfa_token_id': # case with multi-factor authentication
return response['mfa_token_id'] | def function[login_with_api_key, parameter[self, email, api_key, application]]:
constant[
Login and get a token. If you do not specify a specific application,
'Default' will be used.
:param email: Email address of the user
:type email: string
:param api_key: API key assigned to the user
:type api_key: string
:param application: (optional) Application designated for this API key
:type application: string
:returns: Token to be used for interaction with the API until
expiration
:rtype: string
]
variable[parameters] assign[=] call[name[dict], parameter[]]
call[name[parameters]][constant[email]] assign[=] name[email]
call[name[parameters]][constant[apikey]] assign[=] name[api_key]
call[name[parameters]][constant[appname]] assign[=] name[application]
variable[response] assign[=] call[name[self].request, parameter[constant[midas.login], name[parameters]]]
if compare[constant[token] in name[response]] begin[:]
return[call[name[response]][constant[token]]]
if constant[mfa_token_id] begin[:]
return[call[name[response]][constant[mfa_token_id]]] | keyword[def] identifier[login_with_api_key] ( identifier[self] , identifier[email] , identifier[api_key] , identifier[application] = literal[string] ):
literal[string]
identifier[parameters] = identifier[dict] ()
identifier[parameters] [ literal[string] ]= identifier[BaseDriver] . identifier[email] = identifier[email]
identifier[parameters] [ literal[string] ]= identifier[BaseDriver] . identifier[apikey] = identifier[api_key]
identifier[parameters] [ literal[string] ]= identifier[application]
identifier[response] = identifier[self] . identifier[request] ( literal[string] , identifier[parameters] )
keyword[if] literal[string] keyword[in] identifier[response] :
keyword[return] identifier[response] [ literal[string] ]
keyword[if] literal[string] :
keyword[return] identifier[response] [ literal[string] ] | def login_with_api_key(self, email, api_key, application='Default'):
"""
Login and get a token. If you do not specify a specific application,
'Default' will be used.
:param email: Email address of the user
:type email: string
:param api_key: API key assigned to the user
:type api_key: string
:param application: (optional) Application designated for this API key
:type application: string
:returns: Token to be used for interaction with the API until
expiration
:rtype: string
"""
parameters = dict()
parameters['email'] = BaseDriver.email = email # Cache email
parameters['apikey'] = BaseDriver.apikey = api_key # Cache API key
parameters['appname'] = application
response = self.request('midas.login', parameters)
if 'token' in response: # normal case
return response['token'] # depends on [control=['if'], data=['response']]
if 'mfa_token_id': # case with multi-factor authentication
return response['mfa_token_id'] # depends on [control=['if'], data=[]] |
def list_documents(self, limit=None):
""" Generates vids of all indexed identifiers.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the document.
"""
limit_str = ''
if limit:
try:
limit_str = 'LIMIT {}'.format(int(limit))
except (TypeError, ValueError):
pass
query = ('SELECT identifier FROM identifier_index ' + limit_str)
for row in self.backend.library.database.connection.execute(query).fetchall():
yield row['identifier'] | def function[list_documents, parameter[self, limit]]:
constant[ Generates vids of all indexed identifiers.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the document.
]
variable[limit_str] assign[=] constant[]
if name[limit] begin[:]
<ast.Try object at 0x7da20c7947f0>
variable[query] assign[=] binary_operation[constant[SELECT identifier FROM identifier_index ] + name[limit_str]]
for taget[name[row]] in starred[call[call[name[self].backend.library.database.connection.execute, parameter[name[query]]].fetchall, parameter[]]] begin[:]
<ast.Yield object at 0x7da20c795750> | keyword[def] identifier[list_documents] ( identifier[self] , identifier[limit] = keyword[None] ):
literal[string]
identifier[limit_str] = literal[string]
keyword[if] identifier[limit] :
keyword[try] :
identifier[limit_str] = literal[string] . identifier[format] ( identifier[int] ( identifier[limit] ))
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[pass]
identifier[query] =( literal[string] + identifier[limit_str] )
keyword[for] identifier[row] keyword[in] identifier[self] . identifier[backend] . identifier[library] . identifier[database] . identifier[connection] . identifier[execute] ( identifier[query] ). identifier[fetchall] ():
keyword[yield] identifier[row] [ literal[string] ] | def list_documents(self, limit=None):
""" Generates vids of all indexed identifiers.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the document.
"""
limit_str = ''
if limit:
try:
limit_str = 'LIMIT {}'.format(int(limit)) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
query = 'SELECT identifier FROM identifier_index ' + limit_str
for row in self.backend.library.database.connection.execute(query).fetchall():
yield row['identifier'] # depends on [control=['for'], data=['row']] |
def event(tagmatch='*',
count=-1,
quiet=False,
sock_dir=None,
pretty=False,
node='minion'):
r'''
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2016.3.0
.. versionchanged:: 2019.2.0
``tagmatch`` can now be either a glob or regular expression.
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this glob or regular expression.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
:param node: Watch the minion-side or master-side event bus.
CLI Example:
.. code-block:: bash
salt-call --local state.event pretty=True
'''
sevent = salt.utils.event.get_event(
node,
sock_dir or __opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=True)
while True:
ret = sevent.get_event(full=True, auto_reconnect=True)
if ret is None:
continue
if salt.utils.stringutils.expr_match(ret['tag'], tagmatch):
if not quiet:
salt.utils.stringutils.print_cli(
str('{0}\t{1}').format( # future lint: blacklisted-function
salt.utils.stringutils.to_str(ret['tag']),
salt.utils.json.dumps(
ret['data'],
sort_keys=pretty,
indent=None if not pretty else 4)
)
)
sys.stdout.flush()
if count > 0:
count -= 1
log.debug('Remaining event matches: %s', count)
if count == 0:
break
else:
log.debug('Skipping event tag: %s', ret['tag'])
continue | def function[event, parameter[tagmatch, count, quiet, sock_dir, pretty, node]]:
constant[
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2016.3.0
.. versionchanged:: 2019.2.0
``tagmatch`` can now be either a glob or regular expression.
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this glob or regular expression.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
:param node: Watch the minion-side or master-side event bus.
CLI Example:
.. code-block:: bash
salt-call --local state.event pretty=True
]
variable[sevent] assign[=] call[name[salt].utils.event.get_event, parameter[name[node], <ast.BoolOp object at 0x7da1b210aa70>, call[name[__opts__]][constant[transport]]]]
while constant[True] begin[:]
variable[ret] assign[=] call[name[sevent].get_event, parameter[]]
if compare[name[ret] is constant[None]] begin[:]
continue
if call[name[salt].utils.stringutils.expr_match, parameter[call[name[ret]][constant[tag]], name[tagmatch]]] begin[:]
if <ast.UnaryOp object at 0x7da1b2109ff0> begin[:]
call[name[salt].utils.stringutils.print_cli, parameter[call[call[name[str], parameter[constant[{0} {1}]]].format, parameter[call[name[salt].utils.stringutils.to_str, parameter[call[name[ret]][constant[tag]]]], call[name[salt].utils.json.dumps, parameter[call[name[ret]][constant[data]]]]]]]]
call[name[sys].stdout.flush, parameter[]]
if compare[name[count] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b2001630>
call[name[log].debug, parameter[constant[Remaining event matches: %s], name[count]]]
if compare[name[count] equal[==] constant[0]] begin[:]
break | keyword[def] identifier[event] ( identifier[tagmatch] = literal[string] ,
identifier[count] =- literal[int] ,
identifier[quiet] = keyword[False] ,
identifier[sock_dir] = keyword[None] ,
identifier[pretty] = keyword[False] ,
identifier[node] = literal[string] ):
literal[string]
identifier[sevent] = identifier[salt] . identifier[utils] . identifier[event] . identifier[get_event] (
identifier[node] ,
identifier[sock_dir] keyword[or] identifier[__opts__] [ literal[string] ],
identifier[__opts__] [ literal[string] ],
identifier[opts] = identifier[__opts__] ,
identifier[listen] = keyword[True] )
keyword[while] keyword[True] :
identifier[ret] = identifier[sevent] . identifier[get_event] ( identifier[full] = keyword[True] , identifier[auto_reconnect] = keyword[True] )
keyword[if] identifier[ret] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[expr_match] ( identifier[ret] [ literal[string] ], identifier[tagmatch] ):
keyword[if] keyword[not] identifier[quiet] :
identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[print_cli] (
identifier[str] ( literal[string] ). identifier[format] (
identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[ret] [ literal[string] ]),
identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] (
identifier[ret] [ literal[string] ],
identifier[sort_keys] = identifier[pretty] ,
identifier[indent] = keyword[None] keyword[if] keyword[not] identifier[pretty] keyword[else] literal[int] )
)
)
identifier[sys] . identifier[stdout] . identifier[flush] ()
keyword[if] identifier[count] > literal[int] :
identifier[count] -= literal[int]
identifier[log] . identifier[debug] ( literal[string] , identifier[count] )
keyword[if] identifier[count] == literal[int] :
keyword[break]
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[ret] [ literal[string] ])
keyword[continue] | def event(tagmatch='*', count=-1, quiet=False, sock_dir=None, pretty=False, node='minion'):
"""
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2016.3.0
.. versionchanged:: 2019.2.0
``tagmatch`` can now be either a glob or regular expression.
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this glob or regular expression.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
:param node: Watch the minion-side or master-side event bus.
CLI Example:
.. code-block:: bash
salt-call --local state.event pretty=True
"""
sevent = salt.utils.event.get_event(node, sock_dir or __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=True)
while True:
ret = sevent.get_event(full=True, auto_reconnect=True)
if ret is None:
continue # depends on [control=['if'], data=[]]
if salt.utils.stringutils.expr_match(ret['tag'], tagmatch):
if not quiet: # future lint: blacklisted-function
salt.utils.stringutils.print_cli(str('{0}\t{1}').format(salt.utils.stringutils.to_str(ret['tag']), salt.utils.json.dumps(ret['data'], sort_keys=pretty, indent=None if not pretty else 4)))
sys.stdout.flush() # depends on [control=['if'], data=[]]
if count > 0:
count -= 1
log.debug('Remaining event matches: %s', count) # depends on [control=['if'], data=['count']]
if count == 0:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
log.debug('Skipping event tag: %s', ret['tag'])
continue # depends on [control=['while'], data=[]] |
def processFlat(self):
"""Main process.
Returns
-------
est_idxs : np.array(N)
Estimated indeces for the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# C-NMF params
niter = self.config["niters"] # Iterations for the MF and clustering
# Preprocess to obtain features, times, and input boundary indeces
F = self._preprocess()
# Normalize
F = U.normalize(F, norm_type=self.config["norm_feats"])
if F.shape[0] >= self.config["h"]:
# Median filter
F = median_filter(F, M=self.config["h"])
#plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show()
# Find the boundary indices and labels using matrix factorization
est_idxs, est_labels = get_segmentation(
F.T, self.config["rank"], self.config["R"],
self.config["rank_labels"], self.config["R_labels"],
niter=niter, bound_idxs=self.in_bound_idxs, in_labels=None)
# Remove empty segments if needed
est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels)
else:
# The track is too short. We will only output the first and last
# time stamps
if self.in_bound_idxs is None:
est_idxs = np.array([0, F.shape[0] - 1])
est_labels = [1]
else:
est_idxs = self.in_bound_idxs
est_labels = [1] * (len(est_idxs) + 1)
# Make sure that the first and last boundaries are included
assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1
# Post process estimations
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
return est_idxs, est_labels | def function[processFlat, parameter[self]]:
constant[Main process.
Returns
-------
est_idxs : np.array(N)
Estimated indeces for the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments.
]
variable[niter] assign[=] call[name[self].config][constant[niters]]
variable[F] assign[=] call[name[self]._preprocess, parameter[]]
variable[F] assign[=] call[name[U].normalize, parameter[name[F]]]
if compare[call[name[F].shape][constant[0]] greater_or_equal[>=] call[name[self].config][constant[h]]] begin[:]
variable[F] assign[=] call[name[median_filter], parameter[name[F]]]
<ast.Tuple object at 0x7da1b01e1930> assign[=] call[name[get_segmentation], parameter[name[F].T, call[name[self].config][constant[rank]], call[name[self].config][constant[R]], call[name[self].config][constant[rank_labels]], call[name[self].config][constant[R_labels]]]]
<ast.Tuple object at 0x7da1b02a4670> assign[=] call[name[U].remove_empty_segments, parameter[name[est_idxs], name[est_labels]]]
assert[<ast.BoolOp object at 0x7da1b02a5b40>]
<ast.Tuple object at 0x7da1b02a4580> assign[=] call[name[self]._postprocess, parameter[name[est_idxs], name[est_labels]]]
return[tuple[[<ast.Name object at 0x7da1b02a7d30>, <ast.Name object at 0x7da1b02a5ae0>]]] | keyword[def] identifier[processFlat] ( identifier[self] ):
literal[string]
identifier[niter] = identifier[self] . identifier[config] [ literal[string] ]
identifier[F] = identifier[self] . identifier[_preprocess] ()
identifier[F] = identifier[U] . identifier[normalize] ( identifier[F] , identifier[norm_type] = identifier[self] . identifier[config] [ literal[string] ])
keyword[if] identifier[F] . identifier[shape] [ literal[int] ]>= identifier[self] . identifier[config] [ literal[string] ]:
identifier[F] = identifier[median_filter] ( identifier[F] , identifier[M] = identifier[self] . identifier[config] [ literal[string] ])
identifier[est_idxs] , identifier[est_labels] = identifier[get_segmentation] (
identifier[F] . identifier[T] , identifier[self] . identifier[config] [ literal[string] ], identifier[self] . identifier[config] [ literal[string] ],
identifier[self] . identifier[config] [ literal[string] ], identifier[self] . identifier[config] [ literal[string] ],
identifier[niter] = identifier[niter] , identifier[bound_idxs] = identifier[self] . identifier[in_bound_idxs] , identifier[in_labels] = keyword[None] )
identifier[est_idxs] , identifier[est_labels] = identifier[U] . identifier[remove_empty_segments] ( identifier[est_idxs] , identifier[est_labels] )
keyword[else] :
keyword[if] identifier[self] . identifier[in_bound_idxs] keyword[is] keyword[None] :
identifier[est_idxs] = identifier[np] . identifier[array] ([ literal[int] , identifier[F] . identifier[shape] [ literal[int] ]- literal[int] ])
identifier[est_labels] =[ literal[int] ]
keyword[else] :
identifier[est_idxs] = identifier[self] . identifier[in_bound_idxs]
identifier[est_labels] =[ literal[int] ]*( identifier[len] ( identifier[est_idxs] )+ literal[int] )
keyword[assert] identifier[est_idxs] [ literal[int] ]== literal[int] keyword[and] identifier[est_idxs] [- literal[int] ]== identifier[F] . identifier[shape] [ literal[int] ]- literal[int]
identifier[est_idxs] , identifier[est_labels] = identifier[self] . identifier[_postprocess] ( identifier[est_idxs] , identifier[est_labels] )
keyword[return] identifier[est_idxs] , identifier[est_labels] | def processFlat(self):
"""Main process.
Returns
-------
est_idxs : np.array(N)
Estimated indeces for the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# C-NMF params
niter = self.config['niters'] # Iterations for the MF and clustering
# Preprocess to obtain features, times, and input boundary indeces
F = self._preprocess()
# Normalize
F = U.normalize(F, norm_type=self.config['norm_feats'])
if F.shape[0] >= self.config['h']:
# Median filter
F = median_filter(F, M=self.config['h'])
#plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show()
# Find the boundary indices and labels using matrix factorization
(est_idxs, est_labels) = get_segmentation(F.T, self.config['rank'], self.config['R'], self.config['rank_labels'], self.config['R_labels'], niter=niter, bound_idxs=self.in_bound_idxs, in_labels=None)
# Remove empty segments if needed
(est_idxs, est_labels) = U.remove_empty_segments(est_idxs, est_labels) # depends on [control=['if'], data=[]]
# The track is too short. We will only output the first and last
# time stamps
elif self.in_bound_idxs is None:
est_idxs = np.array([0, F.shape[0] - 1])
est_labels = [1] # depends on [control=['if'], data=[]]
else:
est_idxs = self.in_bound_idxs
est_labels = [1] * (len(est_idxs) + 1)
# Make sure that the first and last boundaries are included
assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1
# Post process estimations
(est_idxs, est_labels) = self._postprocess(est_idxs, est_labels)
return (est_idxs, est_labels) |
def stop(self):
"""
Stop this process.
Once closed, it should not, and cannot be used again.
:return: :py:attr:`~exitcode`.
"""
self.child.terminate()
self._cleanup()
return self.child.exitcode | def function[stop, parameter[self]]:
constant[
Stop this process.
Once closed, it should not, and cannot be used again.
:return: :py:attr:`~exitcode`.
]
call[name[self].child.terminate, parameter[]]
call[name[self]._cleanup, parameter[]]
return[name[self].child.exitcode] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
identifier[self] . identifier[child] . identifier[terminate] ()
identifier[self] . identifier[_cleanup] ()
keyword[return] identifier[self] . identifier[child] . identifier[exitcode] | def stop(self):
"""
Stop this process.
Once closed, it should not, and cannot be used again.
:return: :py:attr:`~exitcode`.
"""
self.child.terminate()
self._cleanup()
return self.child.exitcode |
def right(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Press right key n times.
**中文文档**
按右方向键 n 次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.right_key, n, interval)
self.delay(post_dl) | def function[right, parameter[self, n, interval, pre_dl, post_dl]]:
constant[Press right key n times.
**中文文档**
按右方向键 n 次。
]
call[name[self].delay, parameter[name[pre_dl]]]
call[name[self].k.tap_key, parameter[name[self].k.right_key, name[n], name[interval]]]
call[name[self].delay, parameter[name[post_dl]]] | keyword[def] identifier[right] ( identifier[self] , identifier[n] = literal[int] , identifier[interval] = literal[int] , identifier[pre_dl] = keyword[None] , identifier[post_dl] = keyword[None] ):
literal[string]
identifier[self] . identifier[delay] ( identifier[pre_dl] )
identifier[self] . identifier[k] . identifier[tap_key] ( identifier[self] . identifier[k] . identifier[right_key] , identifier[n] , identifier[interval] )
identifier[self] . identifier[delay] ( identifier[post_dl] ) | def right(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Press right key n times.
**中文文档**
按右方向键 n 次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.right_key, n, interval)
self.delay(post_dl) |
def check_schema_transforms_match(schema, inverted_features):
"""Checks that the transform and schema do not conflict.
Args:
schema: schema list
inverted_features: inverted_features dict
Raises:
ValueError if transform cannot be applied given schema type.
"""
num_target_transforms = 0
for col_schema in schema:
col_name = col_schema['name']
col_type = col_schema['type'].lower()
# Check each transform and schema are compatible
if col_name in inverted_features:
for transform in inverted_features[col_name]:
transform_name = transform['transform']
if transform_name == constant.TARGET_TRANSFORM:
num_target_transforms += 1
continue
elif col_type in constant.NUMERIC_SCHEMA:
if transform_name not in constant.NUMERIC_TRANSFORMS:
raise ValueError(
'Transform %s not supported by schema %s' % (transform_name, col_type))
elif col_type == constant.STRING_SCHEMA:
if (transform_name not in constant.CATEGORICAL_TRANSFORMS + constant.TEXT_TRANSFORMS and
transform_name != constant.IMAGE_TRANSFORM):
raise ValueError(
'Transform %s not supported by schema %s' % (transform_name, col_type))
else:
raise ValueError('Unsupported schema type %s' % col_type)
# Check each transform is compatible for the same source column.
# inverted_features[col_name] should belong to exactly 1 of the 5 groups.
if col_name in inverted_features:
transform_set = {x['transform'] for x in inverted_features[col_name]}
if 1 != sum([transform_set.issubset(set(constant.NUMERIC_TRANSFORMS)),
transform_set.issubset(set(constant.CATEGORICAL_TRANSFORMS)),
transform_set.issubset(set(constant.TEXT_TRANSFORMS)),
transform_set.issubset(set([constant.IMAGE_TRANSFORM])),
transform_set.issubset(set([constant.TARGET_TRANSFORM]))]):
message = """
The source column of a feature can only be used in multiple
features within the same family of transforms. The familes are
1) text transformations: %s
2) categorical transformations: %s
3) numerical transformations: %s
4) image transformations: %s
5) target transform: %s
Any column can also be a key column.
But column %s is used by transforms %s.
""" % (str(constant.TEXT_TRANSFORMS),
str(constant.CATEGORICAL_TRANSFORMS),
str(constant.NUMERIC_TRANSFORMS),
constant.IMAGE_TRANSFORM,
constant.TARGET_TRANSFORM,
col_name,
str(transform_set))
raise ValueError(message)
if num_target_transforms != 1:
raise ValueError('Must have exactly one target transform') | def function[check_schema_transforms_match, parameter[schema, inverted_features]]:
constant[Checks that the transform and schema do not conflict.
Args:
schema: schema list
inverted_features: inverted_features dict
Raises:
ValueError if transform cannot be applied given schema type.
]
variable[num_target_transforms] assign[=] constant[0]
for taget[name[col_schema]] in starred[name[schema]] begin[:]
variable[col_name] assign[=] call[name[col_schema]][constant[name]]
variable[col_type] assign[=] call[call[name[col_schema]][constant[type]].lower, parameter[]]
if compare[name[col_name] in name[inverted_features]] begin[:]
for taget[name[transform]] in starred[call[name[inverted_features]][name[col_name]]] begin[:]
variable[transform_name] assign[=] call[name[transform]][constant[transform]]
if compare[name[transform_name] equal[==] name[constant].TARGET_TRANSFORM] begin[:]
<ast.AugAssign object at 0x7da20c6e7c10>
continue
if compare[name[col_name] in name[inverted_features]] begin[:]
variable[transform_set] assign[=] <ast.SetComp object at 0x7da20c6e6a10>
if compare[constant[1] not_equal[!=] call[name[sum], parameter[list[[<ast.Call object at 0x7da20c6e6650>, <ast.Call object at 0x7da20c6e7a30>, <ast.Call object at 0x7da20c6e5ed0>, <ast.Call object at 0x7da20c6e6350>, <ast.Call object at 0x7da20c6e7250>]]]]] begin[:]
variable[message] assign[=] binary_operation[constant[
The source column of a feature can only be used in multiple
features within the same family of transforms. The familes are
1) text transformations: %s
2) categorical transformations: %s
3) numerical transformations: %s
4) image transformations: %s
5) target transform: %s
Any column can also be a key column.
But column %s is used by transforms %s.
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c6e5630>, <ast.Call object at 0x7da20c6e59f0>, <ast.Call object at 0x7da20c6e55d0>, <ast.Attribute object at 0x7da20c6e7e50>, <ast.Attribute object at 0x7da18dc04c70>, <ast.Name object at 0x7da18dc06ce0>, <ast.Call object at 0x7da18dc06410>]]]
<ast.Raise object at 0x7da18dc06da0>
if compare[name[num_target_transforms] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da18dc043d0> | keyword[def] identifier[check_schema_transforms_match] ( identifier[schema] , identifier[inverted_features] ):
literal[string]
identifier[num_target_transforms] = literal[int]
keyword[for] identifier[col_schema] keyword[in] identifier[schema] :
identifier[col_name] = identifier[col_schema] [ literal[string] ]
identifier[col_type] = identifier[col_schema] [ literal[string] ]. identifier[lower] ()
keyword[if] identifier[col_name] keyword[in] identifier[inverted_features] :
keyword[for] identifier[transform] keyword[in] identifier[inverted_features] [ identifier[col_name] ]:
identifier[transform_name] = identifier[transform] [ literal[string] ]
keyword[if] identifier[transform_name] == identifier[constant] . identifier[TARGET_TRANSFORM] :
identifier[num_target_transforms] += literal[int]
keyword[continue]
keyword[elif] identifier[col_type] keyword[in] identifier[constant] . identifier[NUMERIC_SCHEMA] :
keyword[if] identifier[transform_name] keyword[not] keyword[in] identifier[constant] . identifier[NUMERIC_TRANSFORMS] :
keyword[raise] identifier[ValueError] (
literal[string] %( identifier[transform_name] , identifier[col_type] ))
keyword[elif] identifier[col_type] == identifier[constant] . identifier[STRING_SCHEMA] :
keyword[if] ( identifier[transform_name] keyword[not] keyword[in] identifier[constant] . identifier[CATEGORICAL_TRANSFORMS] + identifier[constant] . identifier[TEXT_TRANSFORMS] keyword[and]
identifier[transform_name] != identifier[constant] . identifier[IMAGE_TRANSFORM] ):
keyword[raise] identifier[ValueError] (
literal[string] %( identifier[transform_name] , identifier[col_type] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[col_type] )
keyword[if] identifier[col_name] keyword[in] identifier[inverted_features] :
identifier[transform_set] ={ identifier[x] [ literal[string] ] keyword[for] identifier[x] keyword[in] identifier[inverted_features] [ identifier[col_name] ]}
keyword[if] literal[int] != identifier[sum] ([ identifier[transform_set] . identifier[issubset] ( identifier[set] ( identifier[constant] . identifier[NUMERIC_TRANSFORMS] )),
identifier[transform_set] . identifier[issubset] ( identifier[set] ( identifier[constant] . identifier[CATEGORICAL_TRANSFORMS] )),
identifier[transform_set] . identifier[issubset] ( identifier[set] ( identifier[constant] . identifier[TEXT_TRANSFORMS] )),
identifier[transform_set] . identifier[issubset] ( identifier[set] ([ identifier[constant] . identifier[IMAGE_TRANSFORM] ])),
identifier[transform_set] . identifier[issubset] ( identifier[set] ([ identifier[constant] . identifier[TARGET_TRANSFORM] ]))]):
identifier[message] = literal[string] %( identifier[str] ( identifier[constant] . identifier[TEXT_TRANSFORMS] ),
identifier[str] ( identifier[constant] . identifier[CATEGORICAL_TRANSFORMS] ),
identifier[str] ( identifier[constant] . identifier[NUMERIC_TRANSFORMS] ),
identifier[constant] . identifier[IMAGE_TRANSFORM] ,
identifier[constant] . identifier[TARGET_TRANSFORM] ,
identifier[col_name] ,
identifier[str] ( identifier[transform_set] ))
keyword[raise] identifier[ValueError] ( identifier[message] )
keyword[if] identifier[num_target_transforms] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def check_schema_transforms_match(schema, inverted_features):
"""Checks that the transform and schema do not conflict.
Args:
schema: schema list
inverted_features: inverted_features dict
Raises:
ValueError if transform cannot be applied given schema type.
"""
num_target_transforms = 0
for col_schema in schema:
col_name = col_schema['name']
col_type = col_schema['type'].lower()
# Check each transform and schema are compatible
if col_name in inverted_features:
for transform in inverted_features[col_name]:
transform_name = transform['transform']
if transform_name == constant.TARGET_TRANSFORM:
num_target_transforms += 1
continue # depends on [control=['if'], data=[]]
elif col_type in constant.NUMERIC_SCHEMA:
if transform_name not in constant.NUMERIC_TRANSFORMS:
raise ValueError('Transform %s not supported by schema %s' % (transform_name, col_type)) # depends on [control=['if'], data=['transform_name']] # depends on [control=['if'], data=['col_type']]
elif col_type == constant.STRING_SCHEMA:
if transform_name not in constant.CATEGORICAL_TRANSFORMS + constant.TEXT_TRANSFORMS and transform_name != constant.IMAGE_TRANSFORM:
raise ValueError('Transform %s not supported by schema %s' % (transform_name, col_type)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['col_type']]
else:
raise ValueError('Unsupported schema type %s' % col_type) # depends on [control=['for'], data=['transform']] # depends on [control=['if'], data=['col_name', 'inverted_features']]
# Check each transform is compatible for the same source column.
# inverted_features[col_name] should belong to exactly 1 of the 5 groups.
if col_name in inverted_features:
transform_set = {x['transform'] for x in inverted_features[col_name]}
if 1 != sum([transform_set.issubset(set(constant.NUMERIC_TRANSFORMS)), transform_set.issubset(set(constant.CATEGORICAL_TRANSFORMS)), transform_set.issubset(set(constant.TEXT_TRANSFORMS)), transform_set.issubset(set([constant.IMAGE_TRANSFORM])), transform_set.issubset(set([constant.TARGET_TRANSFORM]))]):
message = '\n The source column of a feature can only be used in multiple\n features within the same family of transforms. The familes are\n\n 1) text transformations: %s\n 2) categorical transformations: %s\n 3) numerical transformations: %s\n 4) image transformations: %s\n 5) target transform: %s\n\n Any column can also be a key column.\n\n But column %s is used by transforms %s.\n ' % (str(constant.TEXT_TRANSFORMS), str(constant.CATEGORICAL_TRANSFORMS), str(constant.NUMERIC_TRANSFORMS), constant.IMAGE_TRANSFORM, constant.TARGET_TRANSFORM, col_name, str(transform_set))
raise ValueError(message) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['col_name', 'inverted_features']] # depends on [control=['for'], data=['col_schema']]
if num_target_transforms != 1:
raise ValueError('Must have exactly one target transform') # depends on [control=['if'], data=[]] |
def _resolve_template(value, model_instance=None, context=None):
"""Resolves any template references in the given value."""
if isinstance(value, string_types) and "{" in value:
if context is None:
context = Context()
if model_instance is not None:
context[model_instance._meta.model_name] = model_instance
value = Template(value).render(context)
return value | def function[_resolve_template, parameter[value, model_instance, context]]:
constant[Resolves any template references in the given value.]
if <ast.BoolOp object at 0x7da207f01c60> begin[:]
if compare[name[context] is constant[None]] begin[:]
variable[context] assign[=] call[name[Context], parameter[]]
if compare[name[model_instance] is_not constant[None]] begin[:]
call[name[context]][name[model_instance]._meta.model_name] assign[=] name[model_instance]
variable[value] assign[=] call[call[name[Template], parameter[name[value]]].render, parameter[name[context]]]
return[name[value]] | keyword[def] identifier[_resolve_template] ( identifier[value] , identifier[model_instance] = keyword[None] , identifier[context] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[string_types] ) keyword[and] literal[string] keyword[in] identifier[value] :
keyword[if] identifier[context] keyword[is] keyword[None] :
identifier[context] = identifier[Context] ()
keyword[if] identifier[model_instance] keyword[is] keyword[not] keyword[None] :
identifier[context] [ identifier[model_instance] . identifier[_meta] . identifier[model_name] ]= identifier[model_instance]
identifier[value] = identifier[Template] ( identifier[value] ). identifier[render] ( identifier[context] )
keyword[return] identifier[value] | def _resolve_template(value, model_instance=None, context=None):
"""Resolves any template references in the given value."""
if isinstance(value, string_types) and '{' in value:
if context is None:
context = Context() # depends on [control=['if'], data=['context']]
if model_instance is not None:
context[model_instance._meta.model_name] = model_instance # depends on [control=['if'], data=['model_instance']]
value = Template(value).render(context) # depends on [control=['if'], data=[]]
return value |
def query_pmid():
"""
Returns list of PubMed identifier by query parameters
---
tags:
- Query functions
parameters:
- name: pmid
in: query
type: string
required: false
description: PubMed identifier
default: 20697050
- name: entry_name
in: query
type: string
required: false
description: UniProt entry name
default: A4_HUMAN
- name: first
in: query
type: string
required: false
description: first page
default: 987
- name: last
in: query
type: string
required: false
description: last page
default: 995
- name: volume
in: query
type: string
required: false
description: Volume
default: 67
- name: name
in: query
type: string
required: false
description: Name of journal
default: 'Arch. Neurol.'
- name: date
in: query
type: string
required: false
description: Publication date
default: 2010
- name: title
in: query
type: string
required: false
description: Title of publication
default: '%amyloidosis%'
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
"""
args = get_args(
request_args=request.args,
allowed_str_args=['first', 'last', 'volume', 'name', 'date', 'title', 'entry_name'],
allowed_int_args=['pmid', 'limit']
)
return jsonify(query.pmid(**args)) | def function[query_pmid, parameter[]]:
constant[
Returns list of PubMed identifier by query parameters
---
tags:
- Query functions
parameters:
- name: pmid
in: query
type: string
required: false
description: PubMed identifier
default: 20697050
- name: entry_name
in: query
type: string
required: false
description: UniProt entry name
default: A4_HUMAN
- name: first
in: query
type: string
required: false
description: first page
default: 987
- name: last
in: query
type: string
required: false
description: last page
default: 995
- name: volume
in: query
type: string
required: false
description: Volume
default: 67
- name: name
in: query
type: string
required: false
description: Name of journal
default: 'Arch. Neurol.'
- name: date
in: query
type: string
required: false
description: Publication date
default: 2010
- name: title
in: query
type: string
required: false
description: Title of publication
default: '%amyloidosis%'
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
]
variable[args] assign[=] call[name[get_args], parameter[]]
return[call[name[jsonify], parameter[call[name[query].pmid, parameter[]]]]] | keyword[def] identifier[query_pmid] ():
literal[string]
identifier[args] = identifier[get_args] (
identifier[request_args] = identifier[request] . identifier[args] ,
identifier[allowed_str_args] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ],
identifier[allowed_int_args] =[ literal[string] , literal[string] ]
)
keyword[return] identifier[jsonify] ( identifier[query] . identifier[pmid] (** identifier[args] )) | def query_pmid():
"""
Returns list of PubMed identifier by query parameters
---
tags:
- Query functions
parameters:
- name: pmid
in: query
type: string
required: false
description: PubMed identifier
default: 20697050
- name: entry_name
in: query
type: string
required: false
description: UniProt entry name
default: A4_HUMAN
- name: first
in: query
type: string
required: false
description: first page
default: 987
- name: last
in: query
type: string
required: false
description: last page
default: 995
- name: volume
in: query
type: string
required: false
description: Volume
default: 67
- name: name
in: query
type: string
required: false
description: Name of journal
default: 'Arch. Neurol.'
- name: date
in: query
type: string
required: false
description: Publication date
default: 2010
- name: title
in: query
type: string
required: false
description: Title of publication
default: '%amyloidosis%'
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
"""
args = get_args(request_args=request.args, allowed_str_args=['first', 'last', 'volume', 'name', 'date', 'title', 'entry_name'], allowed_int_args=['pmid', 'limit'])
return jsonify(query.pmid(**args)) |
def parent(self):
"""Parent directory that holds this directory"""
if self._parent is None:
if self.pid is not None:
self._parent = self.api._load_directory(self.pid)
return self._parent | def function[parent, parameter[self]]:
constant[Parent directory that holds this directory]
if compare[name[self]._parent is constant[None]] begin[:]
if compare[name[self].pid is_not constant[None]] begin[:]
name[self]._parent assign[=] call[name[self].api._load_directory, parameter[name[self].pid]]
return[name[self]._parent] | keyword[def] identifier[parent] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_parent] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[pid] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_parent] = identifier[self] . identifier[api] . identifier[_load_directory] ( identifier[self] . identifier[pid] )
keyword[return] identifier[self] . identifier[_parent] | def parent(self):
"""Parent directory that holds this directory"""
if self._parent is None:
if self.pid is not None:
self._parent = self.api._load_directory(self.pid) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._parent |
def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None,
weights_column=None, validation_frame=None, max_runtime_secs=None, ignored_columns=None,
model_id=None, verbose=False):
"""
Train the H2O model.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param H2OFrame training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
:param float max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable.
:param bool verbose: Print scoring history to stdout. Defaults to False.
"""
self._train(x=x, y=y, training_frame=training_frame, offset_column=offset_column, fold_column=fold_column,
weights_column=weights_column, validation_frame=validation_frame, max_runtime_secs=max_runtime_secs,
ignored_columns=ignored_columns, model_id=model_id, verbose=verbose) | def function[train, parameter[self, x, y, training_frame, offset_column, fold_column, weights_column, validation_frame, max_runtime_secs, ignored_columns, model_id, verbose]]:
constant[
Train the H2O model.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param H2OFrame training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
:param float max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable.
:param bool verbose: Print scoring history to stdout. Defaults to False.
]
call[name[self]._train, parameter[]] | keyword[def] identifier[train] ( identifier[self] , identifier[x] = keyword[None] , identifier[y] = keyword[None] , identifier[training_frame] = keyword[None] , identifier[offset_column] = keyword[None] , identifier[fold_column] = keyword[None] ,
identifier[weights_column] = keyword[None] , identifier[validation_frame] = keyword[None] , identifier[max_runtime_secs] = keyword[None] , identifier[ignored_columns] = keyword[None] ,
identifier[model_id] = keyword[None] , identifier[verbose] = keyword[False] ):
literal[string]
identifier[self] . identifier[_train] ( identifier[x] = identifier[x] , identifier[y] = identifier[y] , identifier[training_frame] = identifier[training_frame] , identifier[offset_column] = identifier[offset_column] , identifier[fold_column] = identifier[fold_column] ,
identifier[weights_column] = identifier[weights_column] , identifier[validation_frame] = identifier[validation_frame] , identifier[max_runtime_secs] = identifier[max_runtime_secs] ,
identifier[ignored_columns] = identifier[ignored_columns] , identifier[model_id] = identifier[model_id] , identifier[verbose] = identifier[verbose] ) | def train(self, x=None, y=None, training_frame=None, offset_column=None, fold_column=None, weights_column=None, validation_frame=None, max_runtime_secs=None, ignored_columns=None, model_id=None, verbose=False):
"""
Train the H2O model.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param H2OFrame training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold, offset, and weights).
:param offset_column: The name or index of the column in training_frame that holds the offsets.
:param fold_column: The name or index of the column in training_frame that holds the per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds the per-row weights.
:param validation_frame: H2OFrame with validation data to be scored on while training.
:param float max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable.
:param bool verbose: Print scoring history to stdout. Defaults to False.
"""
self._train(x=x, y=y, training_frame=training_frame, offset_column=offset_column, fold_column=fold_column, weights_column=weights_column, validation_frame=validation_frame, max_runtime_secs=max_runtime_secs, ignored_columns=ignored_columns, model_id=model_id, verbose=verbose) |
def main(reactor):
"""
Close all open streams and circuits in the Tor we connect to
"""
control_ep = UNIXClientEndpoint(reactor, '/var/run/tor/control')
tor = yield txtorcon.connect(reactor, control_ep)
state = yield tor.create_state()
print("Closing all circuits:")
for circuit in list(state.circuits.values()):
path = '->'.join(map(lambda r: r.id_hex, circuit.path))
print("Circuit {} through {}".format(circuit.id, path))
for stream in circuit.streams:
print(" Stream {} to {}".format(stream.id, stream.target_host))
yield stream.close()
print(" closed")
yield circuit.close()
print("closed")
yield tor.quit() | def function[main, parameter[reactor]]:
constant[
Close all open streams and circuits in the Tor we connect to
]
variable[control_ep] assign[=] call[name[UNIXClientEndpoint], parameter[name[reactor], constant[/var/run/tor/control]]]
variable[tor] assign[=] <ast.Yield object at 0x7da20c6a9b70>
variable[state] assign[=] <ast.Yield object at 0x7da20c6a8e80>
call[name[print], parameter[constant[Closing all circuits:]]]
for taget[name[circuit]] in starred[call[name[list], parameter[call[name[state].circuits.values, parameter[]]]]] begin[:]
variable[path] assign[=] call[constant[->].join, parameter[call[name[map], parameter[<ast.Lambda object at 0x7da20c6a8790>, name[circuit].path]]]]
call[name[print], parameter[call[constant[Circuit {} through {}].format, parameter[name[circuit].id, name[path]]]]]
for taget[name[stream]] in starred[name[circuit].streams] begin[:]
call[name[print], parameter[call[constant[ Stream {} to {}].format, parameter[name[stream].id, name[stream].target_host]]]]
<ast.Yield object at 0x7da20c6a80a0>
call[name[print], parameter[constant[ closed]]]
<ast.Yield object at 0x7da18f09da50>
call[name[print], parameter[constant[closed]]]
<ast.Yield object at 0x7da2044c1690> | keyword[def] identifier[main] ( identifier[reactor] ):
literal[string]
identifier[control_ep] = identifier[UNIXClientEndpoint] ( identifier[reactor] , literal[string] )
identifier[tor] = keyword[yield] identifier[txtorcon] . identifier[connect] ( identifier[reactor] , identifier[control_ep] )
identifier[state] = keyword[yield] identifier[tor] . identifier[create_state] ()
identifier[print] ( literal[string] )
keyword[for] identifier[circuit] keyword[in] identifier[list] ( identifier[state] . identifier[circuits] . identifier[values] ()):
identifier[path] = literal[string] . identifier[join] ( identifier[map] ( keyword[lambda] identifier[r] : identifier[r] . identifier[id_hex] , identifier[circuit] . identifier[path] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[circuit] . identifier[id] , identifier[path] ))
keyword[for] identifier[stream] keyword[in] identifier[circuit] . identifier[streams] :
identifier[print] ( literal[string] . identifier[format] ( identifier[stream] . identifier[id] , identifier[stream] . identifier[target_host] ))
keyword[yield] identifier[stream] . identifier[close] ()
identifier[print] ( literal[string] )
keyword[yield] identifier[circuit] . identifier[close] ()
identifier[print] ( literal[string] )
keyword[yield] identifier[tor] . identifier[quit] () | def main(reactor):
"""
Close all open streams and circuits in the Tor we connect to
"""
control_ep = UNIXClientEndpoint(reactor, '/var/run/tor/control')
tor = (yield txtorcon.connect(reactor, control_ep))
state = (yield tor.create_state())
print('Closing all circuits:')
for circuit in list(state.circuits.values()):
path = '->'.join(map(lambda r: r.id_hex, circuit.path))
print('Circuit {} through {}'.format(circuit.id, path))
for stream in circuit.streams:
print(' Stream {} to {}'.format(stream.id, stream.target_host))
yield stream.close()
print(' closed') # depends on [control=['for'], data=['stream']]
yield circuit.close()
print('closed') # depends on [control=['for'], data=['circuit']]
yield tor.quit() |
def get_go2nt_all(self, rcntobj):
"""For each GO id, put all printable fields in one namedtuple."""
if 'go2nt' in self.kws:
go2nt = self.kws['go2nt']
return {go:go2nt[go] for go in self.go2obj}
else:
return self._get_go2nt_all(rcntobj) | def function[get_go2nt_all, parameter[self, rcntobj]]:
constant[For each GO id, put all printable fields in one namedtuple.]
if compare[constant[go2nt] in name[self].kws] begin[:]
variable[go2nt] assign[=] call[name[self].kws][constant[go2nt]]
return[<ast.DictComp object at 0x7da1b26ad360>] | keyword[def] identifier[get_go2nt_all] ( identifier[self] , identifier[rcntobj] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[kws] :
identifier[go2nt] = identifier[self] . identifier[kws] [ literal[string] ]
keyword[return] { identifier[go] : identifier[go2nt] [ identifier[go] ] keyword[for] identifier[go] keyword[in] identifier[self] . identifier[go2obj] }
keyword[else] :
keyword[return] identifier[self] . identifier[_get_go2nt_all] ( identifier[rcntobj] ) | def get_go2nt_all(self, rcntobj):
"""For each GO id, put all printable fields in one namedtuple."""
if 'go2nt' in self.kws:
go2nt = self.kws['go2nt']
return {go: go2nt[go] for go in self.go2obj} # depends on [control=['if'], data=[]]
else:
return self._get_go2nt_all(rcntobj) |
def candidate_foundation(candidate_type, candidate_transport, base_address):
"""
See RFC 5245 - 4.1.1.3. Computing Foundations
"""
key = '%s|%s|%s' % (candidate_type, candidate_transport, base_address)
return hashlib.md5(key.encode('ascii')).hexdigest() | def function[candidate_foundation, parameter[candidate_type, candidate_transport, base_address]]:
constant[
See RFC 5245 - 4.1.1.3. Computing Foundations
]
variable[key] assign[=] binary_operation[constant[%s|%s|%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00ee30>, <ast.Name object at 0x7da18f00d270>, <ast.Name object at 0x7da18f00df60>]]]
return[call[call[name[hashlib].md5, parameter[call[name[key].encode, parameter[constant[ascii]]]]].hexdigest, parameter[]]] | keyword[def] identifier[candidate_foundation] ( identifier[candidate_type] , identifier[candidate_transport] , identifier[base_address] ):
literal[string]
identifier[key] = literal[string] %( identifier[candidate_type] , identifier[candidate_transport] , identifier[base_address] )
keyword[return] identifier[hashlib] . identifier[md5] ( identifier[key] . identifier[encode] ( literal[string] )). identifier[hexdigest] () | def candidate_foundation(candidate_type, candidate_transport, base_address):
"""
See RFC 5245 - 4.1.1.3. Computing Foundations
"""
key = '%s|%s|%s' % (candidate_type, candidate_transport, base_address)
return hashlib.md5(key.encode('ascii')).hexdigest() |
def _get_namedrange(book, rangename, sheetname=None):
"""Get range from a workbook.
A workbook can contain multiple definitions for a single name,
as a name can be defined for the entire book or for
a particular sheet.
If sheet is None, the book-wide def is searched,
otherwise sheet-local def is looked up.
Args:
book: An openpyxl workbook object.
rangename (str): Range expression, such as "A1", "$G4:$K10",
named range "NamedRange1".
sheetname (str, optional): None for book-wide name def,
sheet name for sheet-local named range.
Returns:
Range object specified by the name.
"""
def cond(namedef):
if namedef.type.upper() == "RANGE":
if namedef.name.upper() == rangename.upper():
if sheetname is None:
if not namedef.localSheetId:
return True
else: # sheet local name
sheet_id = [sht.upper() for sht in book.sheetnames].index(
sheetname.upper()
)
if namedef.localSheetId == sheet_id:
return True
return False
def get_destinations(name_def):
"""Workaround for the bug in DefinedName.destinations"""
from openpyxl.formula import Tokenizer
from openpyxl.utils.cell import SHEETRANGE_RE
if name_def.type == "RANGE":
tok = Tokenizer("=" + name_def.value)
for part in tok.items:
if part.subtype == "RANGE":
m = SHEETRANGE_RE.match(part.value)
if m.group("quoted"):
sheet_name = m.group("quoted")
else:
sheet_name = m.group("notquoted")
yield sheet_name, m.group("cells")
namedef = next(
(item for item in book.defined_names.definedName if cond(item)), None
)
if namedef is None:
return None
dests = get_destinations(namedef)
xlranges = []
sheetnames_upper = [name.upper() for name in book.sheetnames]
for sht, addr in dests:
if sheetname:
sht = sheetname
index = sheetnames_upper.index(sht.upper())
xlranges.append(book.worksheets[index][addr])
if len(xlranges) == 1:
return xlranges[0]
else:
return xlranges | def function[_get_namedrange, parameter[book, rangename, sheetname]]:
constant[Get range from a workbook.
A workbook can contain multiple definitions for a single name,
as a name can be defined for the entire book or for
a particular sheet.
If sheet is None, the book-wide def is searched,
otherwise sheet-local def is looked up.
Args:
book: An openpyxl workbook object.
rangename (str): Range expression, such as "A1", "$G4:$K10",
named range "NamedRange1".
sheetname (str, optional): None for book-wide name def,
sheet name for sheet-local named range.
Returns:
Range object specified by the name.
]
def function[cond, parameter[namedef]]:
if compare[call[name[namedef].type.upper, parameter[]] equal[==] constant[RANGE]] begin[:]
if compare[call[name[namedef].name.upper, parameter[]] equal[==] call[name[rangename].upper, parameter[]]] begin[:]
if compare[name[sheetname] is constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1aff1c5b0> begin[:]
return[constant[True]]
return[constant[False]]
def function[get_destinations, parameter[name_def]]:
constant[Workaround for the bug in DefinedName.destinations]
from relative_module[openpyxl.formula] import module[Tokenizer]
from relative_module[openpyxl.utils.cell] import module[SHEETRANGE_RE]
if compare[name[name_def].type equal[==] constant[RANGE]] begin[:]
variable[tok] assign[=] call[name[Tokenizer], parameter[binary_operation[constant[=] + name[name_def].value]]]
for taget[name[part]] in starred[name[tok].items] begin[:]
if compare[name[part].subtype equal[==] constant[RANGE]] begin[:]
variable[m] assign[=] call[name[SHEETRANGE_RE].match, parameter[name[part].value]]
if call[name[m].group, parameter[constant[quoted]]] begin[:]
variable[sheet_name] assign[=] call[name[m].group, parameter[constant[quoted]]]
<ast.Yield object at 0x7da1aff1ef20>
variable[namedef] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da1aff1d060>, constant[None]]]
if compare[name[namedef] is constant[None]] begin[:]
return[constant[None]]
variable[dests] assign[=] call[name[get_destinations], parameter[name[namedef]]]
variable[xlranges] assign[=] list[[]]
variable[sheetnames_upper] assign[=] <ast.ListComp object at 0x7da1aff1e170>
for taget[tuple[[<ast.Name object at 0x7da1aff1d2d0>, <ast.Name object at 0x7da1aff1ff10>]]] in starred[name[dests]] begin[:]
if name[sheetname] begin[:]
variable[sht] assign[=] name[sheetname]
variable[index] assign[=] call[name[sheetnames_upper].index, parameter[call[name[sht].upper, parameter[]]]]
call[name[xlranges].append, parameter[call[call[name[book].worksheets][name[index]]][name[addr]]]]
if compare[call[name[len], parameter[name[xlranges]]] equal[==] constant[1]] begin[:]
return[call[name[xlranges]][constant[0]]] | keyword[def] identifier[_get_namedrange] ( identifier[book] , identifier[rangename] , identifier[sheetname] = keyword[None] ):
literal[string]
keyword[def] identifier[cond] ( identifier[namedef] ):
keyword[if] identifier[namedef] . identifier[type] . identifier[upper] ()== literal[string] :
keyword[if] identifier[namedef] . identifier[name] . identifier[upper] ()== identifier[rangename] . identifier[upper] ():
keyword[if] identifier[sheetname] keyword[is] keyword[None] :
keyword[if] keyword[not] identifier[namedef] . identifier[localSheetId] :
keyword[return] keyword[True]
keyword[else] :
identifier[sheet_id] =[ identifier[sht] . identifier[upper] () keyword[for] identifier[sht] keyword[in] identifier[book] . identifier[sheetnames] ]. identifier[index] (
identifier[sheetname] . identifier[upper] ()
)
keyword[if] identifier[namedef] . identifier[localSheetId] == identifier[sheet_id] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[def] identifier[get_destinations] ( identifier[name_def] ):
literal[string]
keyword[from] identifier[openpyxl] . identifier[formula] keyword[import] identifier[Tokenizer]
keyword[from] identifier[openpyxl] . identifier[utils] . identifier[cell] keyword[import] identifier[SHEETRANGE_RE]
keyword[if] identifier[name_def] . identifier[type] == literal[string] :
identifier[tok] = identifier[Tokenizer] ( literal[string] + identifier[name_def] . identifier[value] )
keyword[for] identifier[part] keyword[in] identifier[tok] . identifier[items] :
keyword[if] identifier[part] . identifier[subtype] == literal[string] :
identifier[m] = identifier[SHEETRANGE_RE] . identifier[match] ( identifier[part] . identifier[value] )
keyword[if] identifier[m] . identifier[group] ( literal[string] ):
identifier[sheet_name] = identifier[m] . identifier[group] ( literal[string] )
keyword[else] :
identifier[sheet_name] = identifier[m] . identifier[group] ( literal[string] )
keyword[yield] identifier[sheet_name] , identifier[m] . identifier[group] ( literal[string] )
identifier[namedef] = identifier[next] (
( identifier[item] keyword[for] identifier[item] keyword[in] identifier[book] . identifier[defined_names] . identifier[definedName] keyword[if] identifier[cond] ( identifier[item] )), keyword[None]
)
keyword[if] identifier[namedef] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[dests] = identifier[get_destinations] ( identifier[namedef] )
identifier[xlranges] =[]
identifier[sheetnames_upper] =[ identifier[name] . identifier[upper] () keyword[for] identifier[name] keyword[in] identifier[book] . identifier[sheetnames] ]
keyword[for] identifier[sht] , identifier[addr] keyword[in] identifier[dests] :
keyword[if] identifier[sheetname] :
identifier[sht] = identifier[sheetname]
identifier[index] = identifier[sheetnames_upper] . identifier[index] ( identifier[sht] . identifier[upper] ())
identifier[xlranges] . identifier[append] ( identifier[book] . identifier[worksheets] [ identifier[index] ][ identifier[addr] ])
keyword[if] identifier[len] ( identifier[xlranges] )== literal[int] :
keyword[return] identifier[xlranges] [ literal[int] ]
keyword[else] :
keyword[return] identifier[xlranges] | def _get_namedrange(book, rangename, sheetname=None):
"""Get range from a workbook.
A workbook can contain multiple definitions for a single name,
as a name can be defined for the entire book or for
a particular sheet.
If sheet is None, the book-wide def is searched,
otherwise sheet-local def is looked up.
Args:
book: An openpyxl workbook object.
rangename (str): Range expression, such as "A1", "$G4:$K10",
named range "NamedRange1".
sheetname (str, optional): None for book-wide name def,
sheet name for sheet-local named range.
Returns:
Range object specified by the name.
"""
def cond(namedef):
if namedef.type.upper() == 'RANGE':
if namedef.name.upper() == rangename.upper():
if sheetname is None:
if not namedef.localSheetId:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else: # sheet local name
sheet_id = [sht.upper() for sht in book.sheetnames].index(sheetname.upper())
if namedef.localSheetId == sheet_id:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return False
def get_destinations(name_def):
"""Workaround for the bug in DefinedName.destinations"""
from openpyxl.formula import Tokenizer
from openpyxl.utils.cell import SHEETRANGE_RE
if name_def.type == 'RANGE':
tok = Tokenizer('=' + name_def.value)
for part in tok.items:
if part.subtype == 'RANGE':
m = SHEETRANGE_RE.match(part.value)
if m.group('quoted'):
sheet_name = m.group('quoted') # depends on [control=['if'], data=[]]
else:
sheet_name = m.group('notquoted')
yield (sheet_name, m.group('cells')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['part']] # depends on [control=['if'], data=[]]
namedef = next((item for item in book.defined_names.definedName if cond(item)), None)
if namedef is None:
return None # depends on [control=['if'], data=[]]
dests = get_destinations(namedef)
xlranges = []
sheetnames_upper = [name.upper() for name in book.sheetnames]
for (sht, addr) in dests:
if sheetname:
sht = sheetname # depends on [control=['if'], data=[]]
index = sheetnames_upper.index(sht.upper())
xlranges.append(book.worksheets[index][addr]) # depends on [control=['for'], data=[]]
if len(xlranges) == 1:
return xlranges[0] # depends on [control=['if'], data=[]]
else:
return xlranges |
def set_active_state(self, name, value):
"""Set active state."""
if name not in self.__active_states.keys():
raise ValueError("Can not set unknown state '" + name + "'")
if (isinstance(self.__active_states[name], int) and
isinstance(value, str)):
# we get an update as str but current value is
# an int, try to convert
self.__active_states[name] = int(value)
elif (isinstance(self.__active_states[name], float) and
isinstance(value, str)):
# we get an update as str but current value is
# a float, try to convert
self.__active_states[name] = float(value)
else:
self.__active_states[name] = value | def function[set_active_state, parameter[self, name, value]]:
constant[Set active state.]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> call[name[self].__active_states.keys, parameter[]]] begin[:]
<ast.Raise object at 0x7da1b0d57ca0>
if <ast.BoolOp object at 0x7da1b0d56ce0> begin[:]
call[name[self].__active_states][name[name]] assign[=] call[name[int], parameter[name[value]]] | keyword[def] identifier[set_active_state] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[__active_states] . identifier[keys] ():
keyword[raise] identifier[ValueError] ( literal[string] + identifier[name] + literal[string] )
keyword[if] ( identifier[isinstance] ( identifier[self] . identifier[__active_states] [ identifier[name] ], identifier[int] ) keyword[and]
identifier[isinstance] ( identifier[value] , identifier[str] )):
identifier[self] . identifier[__active_states] [ identifier[name] ]= identifier[int] ( identifier[value] )
keyword[elif] ( identifier[isinstance] ( identifier[self] . identifier[__active_states] [ identifier[name] ], identifier[float] ) keyword[and]
identifier[isinstance] ( identifier[value] , identifier[str] )):
identifier[self] . identifier[__active_states] [ identifier[name] ]= identifier[float] ( identifier[value] )
keyword[else] :
identifier[self] . identifier[__active_states] [ identifier[name] ]= identifier[value] | def set_active_state(self, name, value):
"""Set active state."""
if name not in self.__active_states.keys():
raise ValueError("Can not set unknown state '" + name + "'") # depends on [control=['if'], data=['name']]
if isinstance(self.__active_states[name], int) and isinstance(value, str):
# we get an update as str but current value is
# an int, try to convert
self.__active_states[name] = int(value) # depends on [control=['if'], data=[]]
elif isinstance(self.__active_states[name], float) and isinstance(value, str):
# we get an update as str but current value is
# a float, try to convert
self.__active_states[name] = float(value) # depends on [control=['if'], data=[]]
else:
self.__active_states[name] = value |
def S_isothermal_pipe_to_isothermal_pipe(D1, D2, W, L=1.):
r'''Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D1` which is `w` distance from another infinite
pipe of outer diameter`D2`. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\cosh^{-1}\left(\frac{4w^2-D_1^2-D_2^2}{2D_1D_2}\right)}
Parameters
----------
D1 : float
Diameter of one pipe, [m]
D2 : float
Diameter of the other pipe, [m]
W : float
Distance from the middle of one pipe to the middle of the other, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_isothermal_pipe(.1, .2, 1, 1)
1.188711034982268
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat
Transfer. Cengage, 2010.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
return 2.*pi*L/acosh((4*W**2 - D1**2 - D2**2)/(2.*D1*D2)) | def function[S_isothermal_pipe_to_isothermal_pipe, parameter[D1, D2, W, L]]:
constant[Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D1` which is `w` distance from another infinite
pipe of outer diameter`D2`. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \frac{2\pi L}{\cosh^{-1}\left(\frac{4w^2-D_1^2-D_2^2}{2D_1D_2}\right)}
Parameters
----------
D1 : float
Diameter of one pipe, [m]
D2 : float
Diameter of the other pipe, [m]
W : float
Distance from the middle of one pipe to the middle of the other, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_isothermal_pipe(.1, .2, 1, 1)
1.188711034982268
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\ R_{\text{shape}}=\frac{1}{Sk}
References
----------
.. [1] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat
Transfer. Cengage, 2010.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
]
return[binary_operation[binary_operation[binary_operation[constant[2.0] * name[pi]] * name[L]] / call[name[acosh], parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[4] * binary_operation[name[W] ** constant[2]]] - binary_operation[name[D1] ** constant[2]]] - binary_operation[name[D2] ** constant[2]]] / binary_operation[binary_operation[constant[2.0] * name[D1]] * name[D2]]]]]]] | keyword[def] identifier[S_isothermal_pipe_to_isothermal_pipe] ( identifier[D1] , identifier[D2] , identifier[W] , identifier[L] = literal[int] ):
literal[string]
keyword[return] literal[int] * identifier[pi] * identifier[L] / identifier[acosh] (( literal[int] * identifier[W] ** literal[int] - identifier[D1] ** literal[int] - identifier[D2] ** literal[int] )/( literal[int] * identifier[D1] * identifier[D2] )) | def S_isothermal_pipe_to_isothermal_pipe(D1, D2, W, L=1.0):
"""Returns the Shape factor `S` of a pipe of constant outer temperature
and of outer diameter `D1` which is `w` distance from another infinite
pipe of outer diameter`D2`. Length `L` must be provided, but can be set to
1 to obtain a dimensionless shape factor used in some sources.
.. math::
S = \\frac{2\\pi L}{\\cosh^{-1}\\left(\\frac{4w^2-D_1^2-D_2^2}{2D_1D_2}\\right)}
Parameters
----------
D1 : float
Diameter of one pipe, [m]
D2 : float
Diameter of the other pipe, [m]
W : float
Distance from the middle of one pipe to the middle of the other, [m]
L : float, optional
Length of the pipe, [m]
Returns
-------
S : float
Shape factor [m]
Examples
--------
>>> S_isothermal_pipe_to_isothermal_pipe(.1, .2, 1, 1)
1.188711034982268
Notes
-----
L should be much larger than both diameters. L should be larger than W.
.. math::
Q = Sk(T_1 - T_2) \\\\ R_{\\text{shape}}=\\frac{1}{Sk}
References
----------
.. [1] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat
Transfer. Cengage, 2010.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
"""
return 2.0 * pi * L / acosh((4 * W ** 2 - D1 ** 2 - D2 ** 2) / (2.0 * D1 * D2)) |
def __get_dbms_version(self, make_connection=True):
"""
Returns the 'DBMS Version' string, or ''. If a connection to the
database has not already been established, a connection will be made
when `make_connection` is True.
"""
if not self.connection and make_connection:
self.connect()
with self.connection.cursor() as cursor:
cursor.execute("SELECT SERVERPROPERTY('productversion')")
return cursor.fetchone()[0] | def function[__get_dbms_version, parameter[self, make_connection]]:
constant[
Returns the 'DBMS Version' string, or ''. If a connection to the
database has not already been established, a connection will be made
when `make_connection` is True.
]
if <ast.BoolOp object at 0x7da1b261e5c0> begin[:]
call[name[self].connect, parameter[]]
with call[name[self].connection.cursor, parameter[]] begin[:]
call[name[cursor].execute, parameter[constant[SELECT SERVERPROPERTY('productversion')]]]
return[call[call[name[cursor].fetchone, parameter[]]][constant[0]]] | keyword[def] identifier[__get_dbms_version] ( identifier[self] , identifier[make_connection] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[connection] keyword[and] identifier[make_connection] :
identifier[self] . identifier[connect] ()
keyword[with] identifier[self] . identifier[connection] . identifier[cursor] () keyword[as] identifier[cursor] :
identifier[cursor] . identifier[execute] ( literal[string] )
keyword[return] identifier[cursor] . identifier[fetchone] ()[ literal[int] ] | def __get_dbms_version(self, make_connection=True):
"""
Returns the 'DBMS Version' string, or ''. If a connection to the
database has not already been established, a connection will be made
when `make_connection` is True.
"""
if not self.connection and make_connection:
self.connect() # depends on [control=['if'], data=[]]
with self.connection.cursor() as cursor:
cursor.execute("SELECT SERVERPROPERTY('productversion')")
return cursor.fetchone()[0] # depends on [control=['with'], data=['cursor']] |
def _output_format(cls, func, override=None):
""" Decorator in charge of giving the output its right format, either
json or pandas
Keyword Arguments:
func: The function to be decorated
override: Override the internal format of the call, default None
"""
@wraps(func)
def _format_wrapper(self, *args, **kwargs):
call_response, data_key, meta_data_key = func(
self, *args, **kwargs)
if 'json' in self.output_format.lower() or 'pandas' \
in self.output_format.lower():
data = call_response[data_key]
if meta_data_key is not None:
meta_data = call_response[meta_data_key]
else:
meta_data = None
# Allow to override the output parameter in the call
if override is None:
output_format = self.output_format.lower()
elif 'json' or 'pandas' in override.lower():
output_format = override.lower()
# Choose output format
if output_format == 'json':
return data, meta_data
elif output_format == 'pandas':
if isinstance(data, list):
# If the call returns a list, then we will append them
# in the resulting data frame. If in the future
# alphavantage decides to do more with returning arrays
# this might become buggy. For now will do the trick.
data_array = []
for val in data:
data_array.append([v for _, v in val.items()])
data_pandas = pandas.DataFrame(data_array, columns=[
k for k, _ in data[0].items()])
else:
data_pandas = pandas.DataFrame.from_dict(data,
orient='index',
dtype=float)
data_pandas.index.name = 'date'
if 'integer' in self.indexing_type:
# Set Date as an actual column so a new numerical index
# will be created, but only when specified by the user.
data_pandas.reset_index(level=0, inplace=True)
return data_pandas, meta_data
elif 'csv' in self.output_format.lower():
return call_response, None
else:
raise ValueError('Format: {} is not supported'.format(
self.output_format))
return _format_wrapper | def function[_output_format, parameter[cls, func, override]]:
constant[ Decorator in charge of giving the output its right format, either
json or pandas
Keyword Arguments:
func: The function to be decorated
override: Override the internal format of the call, default None
]
def function[_format_wrapper, parameter[self]]:
<ast.Tuple object at 0x7da18f812d10> assign[=] call[name[func], parameter[name[self], <ast.Starred object at 0x7da18f8118d0>]]
if <ast.BoolOp object at 0x7da18f811f90> begin[:]
variable[data] assign[=] call[name[call_response]][name[data_key]]
if compare[name[meta_data_key] is_not constant[None]] begin[:]
variable[meta_data] assign[=] call[name[call_response]][name[meta_data_key]]
if compare[name[override] is constant[None]] begin[:]
variable[output_format] assign[=] call[name[self].output_format.lower, parameter[]]
if compare[name[output_format] equal[==] constant[json]] begin[:]
return[tuple[[<ast.Name object at 0x7da18f8118a0>, <ast.Name object at 0x7da18f810fd0>]]]
return[name[_format_wrapper]] | keyword[def] identifier[_output_format] ( identifier[cls] , identifier[func] , identifier[override] = keyword[None] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[_format_wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
identifier[call_response] , identifier[data_key] , identifier[meta_data_key] = identifier[func] (
identifier[self] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[output_format] . identifier[lower] () keyword[or] literal[string] keyword[in] identifier[self] . identifier[output_format] . identifier[lower] ():
identifier[data] = identifier[call_response] [ identifier[data_key] ]
keyword[if] identifier[meta_data_key] keyword[is] keyword[not] keyword[None] :
identifier[meta_data] = identifier[call_response] [ identifier[meta_data_key] ]
keyword[else] :
identifier[meta_data] = keyword[None]
keyword[if] identifier[override] keyword[is] keyword[None] :
identifier[output_format] = identifier[self] . identifier[output_format] . identifier[lower] ()
keyword[elif] literal[string] keyword[or] literal[string] keyword[in] identifier[override] . identifier[lower] ():
identifier[output_format] = identifier[override] . identifier[lower] ()
keyword[if] identifier[output_format] == literal[string] :
keyword[return] identifier[data] , identifier[meta_data]
keyword[elif] identifier[output_format] == literal[string] :
keyword[if] identifier[isinstance] ( identifier[data] , identifier[list] ):
identifier[data_array] =[]
keyword[for] identifier[val] keyword[in] identifier[data] :
identifier[data_array] . identifier[append] ([ identifier[v] keyword[for] identifier[_] , identifier[v] keyword[in] identifier[val] . identifier[items] ()])
identifier[data_pandas] = identifier[pandas] . identifier[DataFrame] ( identifier[data_array] , identifier[columns] =[
identifier[k] keyword[for] identifier[k] , identifier[_] keyword[in] identifier[data] [ literal[int] ]. identifier[items] ()])
keyword[else] :
identifier[data_pandas] = identifier[pandas] . identifier[DataFrame] . identifier[from_dict] ( identifier[data] ,
identifier[orient] = literal[string] ,
identifier[dtype] = identifier[float] )
identifier[data_pandas] . identifier[index] . identifier[name] = literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[indexing_type] :
identifier[data_pandas] . identifier[reset_index] ( identifier[level] = literal[int] , identifier[inplace] = keyword[True] )
keyword[return] identifier[data_pandas] , identifier[meta_data]
keyword[elif] literal[string] keyword[in] identifier[self] . identifier[output_format] . identifier[lower] ():
keyword[return] identifier[call_response] , keyword[None]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] (
identifier[self] . identifier[output_format] ))
keyword[return] identifier[_format_wrapper] | def _output_format(cls, func, override=None):
""" Decorator in charge of giving the output its right format, either
json or pandas
Keyword Arguments:
func: The function to be decorated
override: Override the internal format of the call, default None
"""
@wraps(func)
def _format_wrapper(self, *args, **kwargs):
(call_response, data_key, meta_data_key) = func(self, *args, **kwargs)
if 'json' in self.output_format.lower() or 'pandas' in self.output_format.lower():
data = call_response[data_key]
if meta_data_key is not None:
meta_data = call_response[meta_data_key] # depends on [control=['if'], data=['meta_data_key']]
else:
meta_data = None
# Allow to override the output parameter in the call
if override is None:
output_format = self.output_format.lower() # depends on [control=['if'], data=[]]
elif 'json' or 'pandas' in override.lower():
output_format = override.lower() # depends on [control=['if'], data=[]]
# Choose output format
if output_format == 'json':
return (data, meta_data) # depends on [control=['if'], data=[]]
elif output_format == 'pandas':
if isinstance(data, list):
# If the call returns a list, then we will append them
# in the resulting data frame. If in the future
# alphavantage decides to do more with returning arrays
# this might become buggy. For now will do the trick.
data_array = []
for val in data:
data_array.append([v for (_, v) in val.items()]) # depends on [control=['for'], data=['val']]
data_pandas = pandas.DataFrame(data_array, columns=[k for (k, _) in data[0].items()]) # depends on [control=['if'], data=[]]
else:
data_pandas = pandas.DataFrame.from_dict(data, orient='index', dtype=float)
data_pandas.index.name = 'date'
if 'integer' in self.indexing_type:
# Set Date as an actual column so a new numerical index
# will be created, but only when specified by the user.
data_pandas.reset_index(level=0, inplace=True) # depends on [control=['if'], data=[]]
return (data_pandas, meta_data) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 'csv' in self.output_format.lower():
return (call_response, None) # depends on [control=['if'], data=[]]
else:
raise ValueError('Format: {} is not supported'.format(self.output_format))
return _format_wrapper |
def instructions(self):
"""
Return an iterator over this block's instructions.
The iterator will yield a ValueRef for each instruction.
"""
if not self.is_block:
raise ValueError('expected block value, got %s' % (self._kind,))
it = ffi.lib.LLVMPY_BlockInstructionsIter(self)
parents = self._parents.copy()
parents.update(block=self)
return _InstructionsIterator(it, parents) | def function[instructions, parameter[self]]:
constant[
Return an iterator over this block's instructions.
The iterator will yield a ValueRef for each instruction.
]
if <ast.UnaryOp object at 0x7da1b18a17b0> begin[:]
<ast.Raise object at 0x7da1b18a28c0>
variable[it] assign[=] call[name[ffi].lib.LLVMPY_BlockInstructionsIter, parameter[name[self]]]
variable[parents] assign[=] call[name[self]._parents.copy, parameter[]]
call[name[parents].update, parameter[]]
return[call[name[_InstructionsIterator], parameter[name[it], name[parents]]]] | keyword[def] identifier[instructions] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_block] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[self] . identifier[_kind] ,))
identifier[it] = identifier[ffi] . identifier[lib] . identifier[LLVMPY_BlockInstructionsIter] ( identifier[self] )
identifier[parents] = identifier[self] . identifier[_parents] . identifier[copy] ()
identifier[parents] . identifier[update] ( identifier[block] = identifier[self] )
keyword[return] identifier[_InstructionsIterator] ( identifier[it] , identifier[parents] ) | def instructions(self):
"""
Return an iterator over this block's instructions.
The iterator will yield a ValueRef for each instruction.
"""
if not self.is_block:
raise ValueError('expected block value, got %s' % (self._kind,)) # depends on [control=['if'], data=[]]
it = ffi.lib.LLVMPY_BlockInstructionsIter(self)
parents = self._parents.copy()
parents.update(block=self)
return _InstructionsIterator(it, parents) |
def sampler(dataframe, modulo, column="client_id", sample_id=42):
""" Collect a sample of clients given an input column
Filter dataframe based on the modulus of the CRC32 of a given string
column matching a given sample_id. if dataframe has already been filtered
by sample_id, then modulo should be a multiple of 100, column should be
"client_id", and the given sample_id should match the value previously
used, optionally plus multiples of 100.
Args:
dataframe: A Dataframe to be sampled
modulo (int): selects a 1/modulo sampling of dataframe
column (str): name of a string column to sample on
sample_id (int): modulus result to select for sampling
Returns:
A DataFrame sampled on the given inputs.
"""
return dataframe \
.withColumn(
"sampler",
udf(lambda key: (crc32(key or "") & 0xffffffff) % modulo)(column),
).where("sampler = %s" % sample_id).drop("sampler") | def function[sampler, parameter[dataframe, modulo, column, sample_id]]:
constant[ Collect a sample of clients given an input column
Filter dataframe based on the modulus of the CRC32 of a given string
column matching a given sample_id. if dataframe has already been filtered
by sample_id, then modulo should be a multiple of 100, column should be
"client_id", and the given sample_id should match the value previously
used, optionally plus multiples of 100.
Args:
dataframe: A Dataframe to be sampled
modulo (int): selects a 1/modulo sampling of dataframe
column (str): name of a string column to sample on
sample_id (int): modulus result to select for sampling
Returns:
A DataFrame sampled on the given inputs.
]
return[call[call[call[name[dataframe].withColumn, parameter[constant[sampler], call[call[name[udf], parameter[<ast.Lambda object at 0x7da18bc73670>]], parameter[name[column]]]]].where, parameter[binary_operation[constant[sampler = %s] <ast.Mod object at 0x7da2590d6920> name[sample_id]]]].drop, parameter[constant[sampler]]]] | keyword[def] identifier[sampler] ( identifier[dataframe] , identifier[modulo] , identifier[column] = literal[string] , identifier[sample_id] = literal[int] ):
literal[string]
keyword[return] identifier[dataframe] . identifier[withColumn] (
literal[string] ,
identifier[udf] ( keyword[lambda] identifier[key] :( identifier[crc32] ( identifier[key] keyword[or] literal[string] )& literal[int] )% identifier[modulo] )( identifier[column] ),
). identifier[where] ( literal[string] % identifier[sample_id] ). identifier[drop] ( literal[string] ) | def sampler(dataframe, modulo, column='client_id', sample_id=42):
""" Collect a sample of clients given an input column
Filter dataframe based on the modulus of the CRC32 of a given string
column matching a given sample_id. if dataframe has already been filtered
by sample_id, then modulo should be a multiple of 100, column should be
"client_id", and the given sample_id should match the value previously
used, optionally plus multiples of 100.
Args:
dataframe: A Dataframe to be sampled
modulo (int): selects a 1/modulo sampling of dataframe
column (str): name of a string column to sample on
sample_id (int): modulus result to select for sampling
Returns:
A DataFrame sampled on the given inputs.
"""
return dataframe.withColumn('sampler', udf(lambda key: (crc32(key or '') & 4294967295) % modulo)(column)).where('sampler = %s' % sample_id).drop('sampler') |
def changeGroupImageRemote(self, image_url, thread_id=None):
"""
Changes a thread image from a URL
:param image_url: URL of an image to upload and change
:param thread_id: User/Group ID to change image. See :ref:`intro_threads`
:raises: FBchatException if request failed
"""
(image_id, mimetype), = self._upload(get_files_from_urls([image_url]))
return self._changeGroupImage(image_id, thread_id) | def function[changeGroupImageRemote, parameter[self, image_url, thread_id]]:
constant[
Changes a thread image from a URL
:param image_url: URL of an image to upload and change
:param thread_id: User/Group ID to change image. See :ref:`intro_threads`
:raises: FBchatException if request failed
]
<ast.Tuple object at 0x7da1b18dc610> assign[=] call[name[self]._upload, parameter[call[name[get_files_from_urls], parameter[list[[<ast.Name object at 0x7da1b18dcbe0>]]]]]]
return[call[name[self]._changeGroupImage, parameter[name[image_id], name[thread_id]]]] | keyword[def] identifier[changeGroupImageRemote] ( identifier[self] , identifier[image_url] , identifier[thread_id] = keyword[None] ):
literal[string]
( identifier[image_id] , identifier[mimetype] ),= identifier[self] . identifier[_upload] ( identifier[get_files_from_urls] ([ identifier[image_url] ]))
keyword[return] identifier[self] . identifier[_changeGroupImage] ( identifier[image_id] , identifier[thread_id] ) | def changeGroupImageRemote(self, image_url, thread_id=None):
"""
Changes a thread image from a URL
:param image_url: URL of an image to upload and change
:param thread_id: User/Group ID to change image. See :ref:`intro_threads`
:raises: FBchatException if request failed
"""
((image_id, mimetype),) = self._upload(get_files_from_urls([image_url]))
return self._changeGroupImage(image_id, thread_id) |
def _write_zip(self, func_src, fpath):
"""
Write the function source to a zip file, suitable for upload to
Lambda.
Note there's a bit of undocumented magic going on here; Lambda needs
the execute bit set on the module with the handler in it (i.e. 0755
or 0555 permissions). There doesn't seem to be *any* documentation on
how to do this in the Python docs. The only real hint comes from the
source code of ``zipfile.ZipInfo.from_file()``, which includes:
st = os.stat(filename)
...
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
:param func_src: lambda function source
:type func_src: str
:param fpath: path to write the zip file at
:type fpath: str
"""
# get timestamp for file
now = datetime.now()
zi_tup = (now.year, now.month, now.day, now.hour, now.minute,
now.second)
logger.debug('setting zipinfo date to: %s', zi_tup)
# create a ZipInfo so we can set file attributes/mode
zinfo = zipfile.ZipInfo('webhook2lambda2sqs_func.py', zi_tup)
# set file mode
zinfo.external_attr = 0x0755 << 16
logger.debug('setting zipinfo file mode to: %s', zinfo.external_attr)
logger.debug('writing zip file at: %s', fpath)
with zipfile.ZipFile(fpath, 'w') as z:
z.writestr(zinfo, func_src) | def function[_write_zip, parameter[self, func_src, fpath]]:
constant[
Write the function source to a zip file, suitable for upload to
Lambda.
Note there's a bit of undocumented magic going on here; Lambda needs
the execute bit set on the module with the handler in it (i.e. 0755
or 0555 permissions). There doesn't seem to be *any* documentation on
how to do this in the Python docs. The only real hint comes from the
source code of ``zipfile.ZipInfo.from_file()``, which includes:
st = os.stat(filename)
...
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
:param func_src: lambda function source
:type func_src: str
:param fpath: path to write the zip file at
:type fpath: str
]
variable[now] assign[=] call[name[datetime].now, parameter[]]
variable[zi_tup] assign[=] tuple[[<ast.Attribute object at 0x7da20c7958d0>, <ast.Attribute object at 0x7da20c795990>, <ast.Attribute object at 0x7da20c795480>, <ast.Attribute object at 0x7da20c794580>, <ast.Attribute object at 0x7da20c794850>, <ast.Attribute object at 0x7da20c795f90>]]
call[name[logger].debug, parameter[constant[setting zipinfo date to: %s], name[zi_tup]]]
variable[zinfo] assign[=] call[name[zipfile].ZipInfo, parameter[constant[webhook2lambda2sqs_func.py], name[zi_tup]]]
name[zinfo].external_attr assign[=] binary_operation[constant[1877] <ast.LShift object at 0x7da2590d69e0> constant[16]]
call[name[logger].debug, parameter[constant[setting zipinfo file mode to: %s], name[zinfo].external_attr]]
call[name[logger].debug, parameter[constant[writing zip file at: %s], name[fpath]]]
with call[name[zipfile].ZipFile, parameter[name[fpath], constant[w]]] begin[:]
call[name[z].writestr, parameter[name[zinfo], name[func_src]]] | keyword[def] identifier[_write_zip] ( identifier[self] , identifier[func_src] , identifier[fpath] ):
literal[string]
identifier[now] = identifier[datetime] . identifier[now] ()
identifier[zi_tup] =( identifier[now] . identifier[year] , identifier[now] . identifier[month] , identifier[now] . identifier[day] , identifier[now] . identifier[hour] , identifier[now] . identifier[minute] ,
identifier[now] . identifier[second] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[zi_tup] )
identifier[zinfo] = identifier[zipfile] . identifier[ZipInfo] ( literal[string] , identifier[zi_tup] )
identifier[zinfo] . identifier[external_attr] = literal[int] << literal[int]
identifier[logger] . identifier[debug] ( literal[string] , identifier[zinfo] . identifier[external_attr] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[fpath] )
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[fpath] , literal[string] ) keyword[as] identifier[z] :
identifier[z] . identifier[writestr] ( identifier[zinfo] , identifier[func_src] ) | def _write_zip(self, func_src, fpath):
"""
Write the function source to a zip file, suitable for upload to
Lambda.
Note there's a bit of undocumented magic going on here; Lambda needs
the execute bit set on the module with the handler in it (i.e. 0755
or 0555 permissions). There doesn't seem to be *any* documentation on
how to do this in the Python docs. The only real hint comes from the
source code of ``zipfile.ZipInfo.from_file()``, which includes:
st = os.stat(filename)
...
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
:param func_src: lambda function source
:type func_src: str
:param fpath: path to write the zip file at
:type fpath: str
"""
# get timestamp for file
now = datetime.now()
zi_tup = (now.year, now.month, now.day, now.hour, now.minute, now.second)
logger.debug('setting zipinfo date to: %s', zi_tup)
# create a ZipInfo so we can set file attributes/mode
zinfo = zipfile.ZipInfo('webhook2lambda2sqs_func.py', zi_tup)
# set file mode
zinfo.external_attr = 1877 << 16
logger.debug('setting zipinfo file mode to: %s', zinfo.external_attr)
logger.debug('writing zip file at: %s', fpath)
with zipfile.ZipFile(fpath, 'w') as z:
z.writestr(zinfo, func_src) # depends on [control=['with'], data=['z']] |
def wait_next_block_factory(app, timeout=None):
"""Creates a `wait_next_block` function, that
will wait `timeout` seconds (`None` = indefinitely)
for a new block to appear.
:param app: the app-instance the function should work for
:param timeout: timeout in seconds
"""
chain = app.services.chain
# setup new block callbacks and events
new_block_evt = gevent.event.Event()
def _on_new_block(app):
log.DEV('new block mined')
new_block_evt.set()
chain.on_new_head_cbs.append(_on_new_block)
def wait_next_block():
bn = chain.chain.head.number
chain.consensus_manager.log('waiting for new block', block=bn)
new_block_evt.wait(timeout)
new_block_evt.clear()
if chain.chain.head.number > bn:
chain.consensus_manager.log('new block event', block=chain.chain.head.number)
elif chain.chain.head.number == bn:
chain.consensus_manager.log('wait_next_block timed out', block=bn)
return wait_next_block | def function[wait_next_block_factory, parameter[app, timeout]]:
constant[Creates a `wait_next_block` function, that
will wait `timeout` seconds (`None` = indefinitely)
for a new block to appear.
:param app: the app-instance the function should work for
:param timeout: timeout in seconds
]
variable[chain] assign[=] name[app].services.chain
variable[new_block_evt] assign[=] call[name[gevent].event.Event, parameter[]]
def function[_on_new_block, parameter[app]]:
call[name[log].DEV, parameter[constant[new block mined]]]
call[name[new_block_evt].set, parameter[]]
call[name[chain].on_new_head_cbs.append, parameter[name[_on_new_block]]]
def function[wait_next_block, parameter[]]:
variable[bn] assign[=] name[chain].chain.head.number
call[name[chain].consensus_manager.log, parameter[constant[waiting for new block]]]
call[name[new_block_evt].wait, parameter[name[timeout]]]
call[name[new_block_evt].clear, parameter[]]
if compare[name[chain].chain.head.number greater[>] name[bn]] begin[:]
call[name[chain].consensus_manager.log, parameter[constant[new block event]]]
return[name[wait_next_block]] | keyword[def] identifier[wait_next_block_factory] ( identifier[app] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[chain] = identifier[app] . identifier[services] . identifier[chain]
identifier[new_block_evt] = identifier[gevent] . identifier[event] . identifier[Event] ()
keyword[def] identifier[_on_new_block] ( identifier[app] ):
identifier[log] . identifier[DEV] ( literal[string] )
identifier[new_block_evt] . identifier[set] ()
identifier[chain] . identifier[on_new_head_cbs] . identifier[append] ( identifier[_on_new_block] )
keyword[def] identifier[wait_next_block] ():
identifier[bn] = identifier[chain] . identifier[chain] . identifier[head] . identifier[number]
identifier[chain] . identifier[consensus_manager] . identifier[log] ( literal[string] , identifier[block] = identifier[bn] )
identifier[new_block_evt] . identifier[wait] ( identifier[timeout] )
identifier[new_block_evt] . identifier[clear] ()
keyword[if] identifier[chain] . identifier[chain] . identifier[head] . identifier[number] > identifier[bn] :
identifier[chain] . identifier[consensus_manager] . identifier[log] ( literal[string] , identifier[block] = identifier[chain] . identifier[chain] . identifier[head] . identifier[number] )
keyword[elif] identifier[chain] . identifier[chain] . identifier[head] . identifier[number] == identifier[bn] :
identifier[chain] . identifier[consensus_manager] . identifier[log] ( literal[string] , identifier[block] = identifier[bn] )
keyword[return] identifier[wait_next_block] | def wait_next_block_factory(app, timeout=None):
"""Creates a `wait_next_block` function, that
will wait `timeout` seconds (`None` = indefinitely)
for a new block to appear.
:param app: the app-instance the function should work for
:param timeout: timeout in seconds
"""
chain = app.services.chain
# setup new block callbacks and events
new_block_evt = gevent.event.Event()
def _on_new_block(app):
log.DEV('new block mined')
new_block_evt.set()
chain.on_new_head_cbs.append(_on_new_block)
def wait_next_block():
bn = chain.chain.head.number
chain.consensus_manager.log('waiting for new block', block=bn)
new_block_evt.wait(timeout)
new_block_evt.clear()
if chain.chain.head.number > bn:
chain.consensus_manager.log('new block event', block=chain.chain.head.number) # depends on [control=['if'], data=[]]
elif chain.chain.head.number == bn:
chain.consensus_manager.log('wait_next_block timed out', block=bn) # depends on [control=['if'], data=['bn']]
return wait_next_block |
def Back(self, n = 1, dl = 0):
"""退格键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.backspace_key, n) | def function[Back, parameter[self, n, dl]]:
constant[退格键n次
]
call[name[self].Delay, parameter[name[dl]]]
call[name[self].keyboard.tap_key, parameter[name[self].keyboard.backspace_key, name[n]]] | keyword[def] identifier[Back] ( identifier[self] , identifier[n] = literal[int] , identifier[dl] = literal[int] ):
literal[string]
identifier[self] . identifier[Delay] ( identifier[dl] )
identifier[self] . identifier[keyboard] . identifier[tap_key] ( identifier[self] . identifier[keyboard] . identifier[backspace_key] , identifier[n] ) | def Back(self, n=1, dl=0):
"""退格键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.backspace_key, n) |
def syslog_generate(str_processName, str_pid):
'''
Returns a string similar to:
Tue Oct 9 10:49:53 2012 pretoria message.py[26873]:
where 'pretoria' is the hostname, 'message.py' is the current process
name and 26873 is the current process id.
'''
localtime = time.asctime( time.localtime(time.time()) )
hostname = os.uname()[1]
syslog = '%s %s %s[%s]' % (localtime, hostname, str_processName, str_pid)
return syslog | def function[syslog_generate, parameter[str_processName, str_pid]]:
constant[
Returns a string similar to:
Tue Oct 9 10:49:53 2012 pretoria message.py[26873]:
where 'pretoria' is the hostname, 'message.py' is the current process
name and 26873 is the current process id.
]
variable[localtime] assign[=] call[name[time].asctime, parameter[call[name[time].localtime, parameter[call[name[time].time, parameter[]]]]]]
variable[hostname] assign[=] call[call[name[os].uname, parameter[]]][constant[1]]
variable[syslog] assign[=] binary_operation[constant[%s %s %s[%s]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b07f7a30>, <ast.Name object at 0x7da1b07f7d00>, <ast.Name object at 0x7da1b07f69b0>, <ast.Name object at 0x7da1b07f7be0>]]]
return[name[syslog]] | keyword[def] identifier[syslog_generate] ( identifier[str_processName] , identifier[str_pid] ):
literal[string]
identifier[localtime] = identifier[time] . identifier[asctime] ( identifier[time] . identifier[localtime] ( identifier[time] . identifier[time] ()))
identifier[hostname] = identifier[os] . identifier[uname] ()[ literal[int] ]
identifier[syslog] = literal[string] %( identifier[localtime] , identifier[hostname] , identifier[str_processName] , identifier[str_pid] )
keyword[return] identifier[syslog] | def syslog_generate(str_processName, str_pid):
"""
Returns a string similar to:
Tue Oct 9 10:49:53 2012 pretoria message.py[26873]:
where 'pretoria' is the hostname, 'message.py' is the current process
name and 26873 is the current process id.
"""
localtime = time.asctime(time.localtime(time.time()))
hostname = os.uname()[1]
syslog = '%s %s %s[%s]' % (localtime, hostname, str_processName, str_pid)
return syslog |
def sector(self, start_ray, end_ray, start_distance=None, end_distance=None, units='b'):
"""Slices a sector from the selected dataset.
Slice contains the start and end rays. If start and end rays are equal
one ray is returned. If the start_ray is greater than the end_ray
slicing continues over the 359-0 border.
Parameters
----------
start_ray : int
Starting ray of of the slice first ray is 0
end_ray : int
End ray of the slice, last ray is 359
Keywords
--------
start_distance : int
Starting distance of the slice, if not defined sector starts
form zero
end_distance : int
Ending distance of the slice, if not defined sector continues to
the end last ray of the dataset
units : str
Units used in distance slicing. Option 'b' means that bin number
is used as index. Option 'm' means that meters are used and the
slicing index is calculated using bin width.
Returns
-------
sector : ndarray
Numpy array containing the sector values
Examples
--------
Get one ray from the selected dataset
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> ray = pvol.sector(10, 10)
Get sector from selected dataset, rays from 100 to 200
at distances from 5 km to 10 km.
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> sector = pvol.sector(100, 200, 5000, 10000)
"""
if self.dataset is None:
raise ValueError('Dataset is not selected')
# Validate parameter values
ray_max, distance_max = self.dataset.shape
if start_ray > ray_max:
raise ValueError('Value of start_ray is bigger than the number of rays')
if start_ray < 0:
raise ValueError('start_ray must be non negative')
if start_distance is None:
start_distance_index = 0
else:
if units == 'b':
start_distance_index = start_distance
elif units == 'm':
try:
rscale = next(self.attr_gen('rscale')).value
except:
raise MissingMetadataError
start_distance_index = int(start_distance / rscale)
if end_distance is None:
end_distance_index = self.dataset.shape[1]
else:
if units == 'b':
end_distance_index = end_distance
elif units == 'm':
end_distance_index = int(end_distance / rscale)
if end_ray is None:
sector = self.dataset[start_ray, start_distance_index:end_distance_index]
else:
if start_ray <= end_ray:
sector = self.dataset[start_ray:end_ray+1, start_distance_index:end_distance_index]
else:
sector1 = self.dataset[start_ray:, start_distance_index:end_distance_index]
sector2 = self.dataset[:end_ray+1, start_distance_index:end_distance_index]
sector = np.concatenate((sector1, sector2), axis=0)
return sector | def function[sector, parameter[self, start_ray, end_ray, start_distance, end_distance, units]]:
constant[Slices a sector from the selected dataset.
Slice contains the start and end rays. If start and end rays are equal
one ray is returned. If the start_ray is greater than the end_ray
slicing continues over the 359-0 border.
Parameters
----------
start_ray : int
Starting ray of of the slice first ray is 0
end_ray : int
End ray of the slice, last ray is 359
Keywords
--------
start_distance : int
Starting distance of the slice, if not defined sector starts
form zero
end_distance : int
Ending distance of the slice, if not defined sector continues to
the end last ray of the dataset
units : str
Units used in distance slicing. Option 'b' means that bin number
is used as index. Option 'm' means that meters are used and the
slicing index is calculated using bin width.
Returns
-------
sector : ndarray
Numpy array containing the sector values
Examples
--------
Get one ray from the selected dataset
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> ray = pvol.sector(10, 10)
Get sector from selected dataset, rays from 100 to 200
at distances from 5 km to 10 km.
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> sector = pvol.sector(100, 200, 5000, 10000)
]
if compare[name[self].dataset is constant[None]] begin[:]
<ast.Raise object at 0x7da1b28fc490>
<ast.Tuple object at 0x7da1b28a88e0> assign[=] name[self].dataset.shape
if compare[name[start_ray] greater[>] name[ray_max]] begin[:]
<ast.Raise object at 0x7da1b28a91e0>
if compare[name[start_ray] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b28a97e0>
if compare[name[start_distance] is constant[None]] begin[:]
variable[start_distance_index] assign[=] constant[0]
if compare[name[end_distance] is constant[None]] begin[:]
variable[end_distance_index] assign[=] call[name[self].dataset.shape][constant[1]]
if compare[name[end_ray] is constant[None]] begin[:]
variable[sector] assign[=] call[name[self].dataset][tuple[[<ast.Name object at 0x7da1b28fccd0>, <ast.Slice object at 0x7da1b28ffa00>]]]
return[name[sector]] | keyword[def] identifier[sector] ( identifier[self] , identifier[start_ray] , identifier[end_ray] , identifier[start_distance] = keyword[None] , identifier[end_distance] = keyword[None] , identifier[units] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[dataset] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[ray_max] , identifier[distance_max] = identifier[self] . identifier[dataset] . identifier[shape]
keyword[if] identifier[start_ray] > identifier[ray_max] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[start_ray] < literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[start_distance] keyword[is] keyword[None] :
identifier[start_distance_index] = literal[int]
keyword[else] :
keyword[if] identifier[units] == literal[string] :
identifier[start_distance_index] = identifier[start_distance]
keyword[elif] identifier[units] == literal[string] :
keyword[try] :
identifier[rscale] = identifier[next] ( identifier[self] . identifier[attr_gen] ( literal[string] )). identifier[value]
keyword[except] :
keyword[raise] identifier[MissingMetadataError]
identifier[start_distance_index] = identifier[int] ( identifier[start_distance] / identifier[rscale] )
keyword[if] identifier[end_distance] keyword[is] keyword[None] :
identifier[end_distance_index] = identifier[self] . identifier[dataset] . identifier[shape] [ literal[int] ]
keyword[else] :
keyword[if] identifier[units] == literal[string] :
identifier[end_distance_index] = identifier[end_distance]
keyword[elif] identifier[units] == literal[string] :
identifier[end_distance_index] = identifier[int] ( identifier[end_distance] / identifier[rscale] )
keyword[if] identifier[end_ray] keyword[is] keyword[None] :
identifier[sector] = identifier[self] . identifier[dataset] [ identifier[start_ray] , identifier[start_distance_index] : identifier[end_distance_index] ]
keyword[else] :
keyword[if] identifier[start_ray] <= identifier[end_ray] :
identifier[sector] = identifier[self] . identifier[dataset] [ identifier[start_ray] : identifier[end_ray] + literal[int] , identifier[start_distance_index] : identifier[end_distance_index] ]
keyword[else] :
identifier[sector1] = identifier[self] . identifier[dataset] [ identifier[start_ray] :, identifier[start_distance_index] : identifier[end_distance_index] ]
identifier[sector2] = identifier[self] . identifier[dataset] [: identifier[end_ray] + literal[int] , identifier[start_distance_index] : identifier[end_distance_index] ]
identifier[sector] = identifier[np] . identifier[concatenate] (( identifier[sector1] , identifier[sector2] ), identifier[axis] = literal[int] )
keyword[return] identifier[sector] | def sector(self, start_ray, end_ray, start_distance=None, end_distance=None, units='b'):
"""Slices a sector from the selected dataset.
Slice contains the start and end rays. If start and end rays are equal
one ray is returned. If the start_ray is greater than the end_ray
slicing continues over the 359-0 border.
Parameters
----------
start_ray : int
Starting ray of of the slice first ray is 0
end_ray : int
End ray of the slice, last ray is 359
Keywords
--------
start_distance : int
Starting distance of the slice, if not defined sector starts
form zero
end_distance : int
Ending distance of the slice, if not defined sector continues to
the end last ray of the dataset
units : str
Units used in distance slicing. Option 'b' means that bin number
is used as index. Option 'm' means that meters are used and the
slicing index is calculated using bin width.
Returns
-------
sector : ndarray
Numpy array containing the sector values
Examples
--------
Get one ray from the selected dataset
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> ray = pvol.sector(10, 10)
Get sector from selected dataset, rays from 100 to 200
at distances from 5 km to 10 km.
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> sector = pvol.sector(100, 200, 5000, 10000)
"""
if self.dataset is None:
raise ValueError('Dataset is not selected') # depends on [control=['if'], data=[]] # Validate parameter values
(ray_max, distance_max) = self.dataset.shape
if start_ray > ray_max:
raise ValueError('Value of start_ray is bigger than the number of rays') # depends on [control=['if'], data=[]]
if start_ray < 0:
raise ValueError('start_ray must be non negative') # depends on [control=['if'], data=[]]
if start_distance is None:
start_distance_index = 0 # depends on [control=['if'], data=[]]
elif units == 'b':
start_distance_index = start_distance # depends on [control=['if'], data=[]]
elif units == 'm':
try:
rscale = next(self.attr_gen('rscale')).value # depends on [control=['try'], data=[]]
except:
raise MissingMetadataError # depends on [control=['except'], data=[]]
start_distance_index = int(start_distance / rscale) # depends on [control=['if'], data=[]]
if end_distance is None:
end_distance_index = self.dataset.shape[1] # depends on [control=['if'], data=[]]
elif units == 'b':
end_distance_index = end_distance # depends on [control=['if'], data=[]]
elif units == 'm':
end_distance_index = int(end_distance / rscale) # depends on [control=['if'], data=[]]
if end_ray is None:
sector = self.dataset[start_ray, start_distance_index:end_distance_index] # depends on [control=['if'], data=[]]
elif start_ray <= end_ray:
sector = self.dataset[start_ray:end_ray + 1, start_distance_index:end_distance_index] # depends on [control=['if'], data=['start_ray', 'end_ray']]
else:
sector1 = self.dataset[start_ray:, start_distance_index:end_distance_index]
sector2 = self.dataset[:end_ray + 1, start_distance_index:end_distance_index]
sector = np.concatenate((sector1, sector2), axis=0)
return sector |
def AddServiceDescriptor(self, service_desc):
"""Adds a ServiceDescriptor to the pool.
Args:
service_desc: A ServiceDescriptor.
"""
if not isinstance(service_desc, descriptor.ServiceDescriptor):
raise TypeError('Expected instance of descriptor.ServiceDescriptor.')
self._service_descriptors[service_desc.full_name] = service_desc | def function[AddServiceDescriptor, parameter[self, service_desc]]:
constant[Adds a ServiceDescriptor to the pool.
Args:
service_desc: A ServiceDescriptor.
]
if <ast.UnaryOp object at 0x7da1b208c040> begin[:]
<ast.Raise object at 0x7da1b1f77b80>
call[name[self]._service_descriptors][name[service_desc].full_name] assign[=] name[service_desc] | keyword[def] identifier[AddServiceDescriptor] ( identifier[self] , identifier[service_desc] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[service_desc] , identifier[descriptor] . identifier[ServiceDescriptor] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_service_descriptors] [ identifier[service_desc] . identifier[full_name] ]= identifier[service_desc] | def AddServiceDescriptor(self, service_desc):
"""Adds a ServiceDescriptor to the pool.
Args:
service_desc: A ServiceDescriptor.
"""
if not isinstance(service_desc, descriptor.ServiceDescriptor):
raise TypeError('Expected instance of descriptor.ServiceDescriptor.') # depends on [control=['if'], data=[]]
self._service_descriptors[service_desc.full_name] = service_desc |
def get_params(self, deep=False):
"""Get parameters."""
params = super(XGBModel, self).get_params(deep=deep)
if isinstance(self.kwargs, dict): # if kwargs is a dict, update params accordingly
params.update(self.kwargs)
if params['missing'] is np.nan:
params['missing'] = None # sklearn doesn't handle nan. see #4725
if not params.get('eval_metric', True):
del params['eval_metric'] # don't give as None param to Booster
return params | def function[get_params, parameter[self, deep]]:
constant[Get parameters.]
variable[params] assign[=] call[call[name[super], parameter[name[XGBModel], name[self]]].get_params, parameter[]]
if call[name[isinstance], parameter[name[self].kwargs, name[dict]]] begin[:]
call[name[params].update, parameter[name[self].kwargs]]
if compare[call[name[params]][constant[missing]] is name[np].nan] begin[:]
call[name[params]][constant[missing]] assign[=] constant[None]
if <ast.UnaryOp object at 0x7da1b209afe0> begin[:]
<ast.Delete object at 0x7da1b209b9d0>
return[name[params]] | keyword[def] identifier[get_params] ( identifier[self] , identifier[deep] = keyword[False] ):
literal[string]
identifier[params] = identifier[super] ( identifier[XGBModel] , identifier[self] ). identifier[get_params] ( identifier[deep] = identifier[deep] )
keyword[if] identifier[isinstance] ( identifier[self] . identifier[kwargs] , identifier[dict] ):
identifier[params] . identifier[update] ( identifier[self] . identifier[kwargs] )
keyword[if] identifier[params] [ literal[string] ] keyword[is] identifier[np] . identifier[nan] :
identifier[params] [ literal[string] ]= keyword[None]
keyword[if] keyword[not] identifier[params] . identifier[get] ( literal[string] , keyword[True] ):
keyword[del] identifier[params] [ literal[string] ]
keyword[return] identifier[params] | def get_params(self, deep=False):
"""Get parameters."""
params = super(XGBModel, self).get_params(deep=deep)
if isinstance(self.kwargs, dict): # if kwargs is a dict, update params accordingly
params.update(self.kwargs) # depends on [control=['if'], data=[]]
if params['missing'] is np.nan:
params['missing'] = None # sklearn doesn't handle nan. see #4725 # depends on [control=['if'], data=[]]
if not params.get('eval_metric', True):
del params['eval_metric'] # don't give as None param to Booster # depends on [control=['if'], data=[]]
return params |
def set_random_state(state):
"""Force-set the state of factory.fuzzy's random generator."""
randgen.state_set = True
randgen.setstate(state)
faker.generator.random.setstate(state) | def function[set_random_state, parameter[state]]:
constant[Force-set the state of factory.fuzzy's random generator.]
name[randgen].state_set assign[=] constant[True]
call[name[randgen].setstate, parameter[name[state]]]
call[name[faker].generator.random.setstate, parameter[name[state]]] | keyword[def] identifier[set_random_state] ( identifier[state] ):
literal[string]
identifier[randgen] . identifier[state_set] = keyword[True]
identifier[randgen] . identifier[setstate] ( identifier[state] )
identifier[faker] . identifier[generator] . identifier[random] . identifier[setstate] ( identifier[state] ) | def set_random_state(state):
"""Force-set the state of factory.fuzzy's random generator."""
randgen.state_set = True
randgen.setstate(state)
faker.generator.random.setstate(state) |
def is_positive_semidefinite_matrix(mat, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT):
"""Test if a matrix is positive semidefinite"""
if atol is None:
atol = ATOL_DEFAULT
if rtol is None:
rtol = RTOL_DEFAULT
if not is_hermitian_matrix(mat, rtol=rtol, atol=atol):
return False
# Check eigenvalues are all positive
vals = np.linalg.eigvalsh(mat)
for v in vals:
if v < -atol:
return False
return True | def function[is_positive_semidefinite_matrix, parameter[mat, rtol, atol]]:
constant[Test if a matrix is positive semidefinite]
if compare[name[atol] is constant[None]] begin[:]
variable[atol] assign[=] name[ATOL_DEFAULT]
if compare[name[rtol] is constant[None]] begin[:]
variable[rtol] assign[=] name[RTOL_DEFAULT]
if <ast.UnaryOp object at 0x7da1b05aca90> begin[:]
return[constant[False]]
variable[vals] assign[=] call[name[np].linalg.eigvalsh, parameter[name[mat]]]
for taget[name[v]] in starred[name[vals]] begin[:]
if compare[name[v] less[<] <ast.UnaryOp object at 0x7da207f9aad0>] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_positive_semidefinite_matrix] ( identifier[mat] , identifier[rtol] = identifier[RTOL_DEFAULT] , identifier[atol] = identifier[ATOL_DEFAULT] ):
literal[string]
keyword[if] identifier[atol] keyword[is] keyword[None] :
identifier[atol] = identifier[ATOL_DEFAULT]
keyword[if] identifier[rtol] keyword[is] keyword[None] :
identifier[rtol] = identifier[RTOL_DEFAULT]
keyword[if] keyword[not] identifier[is_hermitian_matrix] ( identifier[mat] , identifier[rtol] = identifier[rtol] , identifier[atol] = identifier[atol] ):
keyword[return] keyword[False]
identifier[vals] = identifier[np] . identifier[linalg] . identifier[eigvalsh] ( identifier[mat] )
keyword[for] identifier[v] keyword[in] identifier[vals] :
keyword[if] identifier[v] <- identifier[atol] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_positive_semidefinite_matrix(mat, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT):
"""Test if a matrix is positive semidefinite"""
if atol is None:
atol = ATOL_DEFAULT # depends on [control=['if'], data=['atol']]
if rtol is None:
rtol = RTOL_DEFAULT # depends on [control=['if'], data=['rtol']]
if not is_hermitian_matrix(mat, rtol=rtol, atol=atol):
return False # depends on [control=['if'], data=[]]
# Check eigenvalues are all positive
vals = np.linalg.eigvalsh(mat)
for v in vals:
if v < -atol:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']]
return True |
def is_operation(term: Any) -> bool:
"""Return True iff the given term is a subclass of :class:`.Operation`."""
return isinstance(term, type) and issubclass(term, Operation) | def function[is_operation, parameter[term]]:
constant[Return True iff the given term is a subclass of :class:`.Operation`.]
return[<ast.BoolOp object at 0x7da1b06cee00>] | keyword[def] identifier[is_operation] ( identifier[term] : identifier[Any] )-> identifier[bool] :
literal[string]
keyword[return] identifier[isinstance] ( identifier[term] , identifier[type] ) keyword[and] identifier[issubclass] ( identifier[term] , identifier[Operation] ) | def is_operation(term: Any) -> bool:
"""Return True iff the given term is a subclass of :class:`.Operation`."""
return isinstance(term, type) and issubclass(term, Operation) |
def rosh_hashana_dow(self):
"""Return the Hebrew day of week for Rosh Hashana."""
jdn = conv.hdate_to_jdn(HebrewDate(self.hdate.year, Months.Tishrei, 1))
return (jdn + 1) % 7 + 1 | def function[rosh_hashana_dow, parameter[self]]:
constant[Return the Hebrew day of week for Rosh Hashana.]
variable[jdn] assign[=] call[name[conv].hdate_to_jdn, parameter[call[name[HebrewDate], parameter[name[self].hdate.year, name[Months].Tishrei, constant[1]]]]]
return[binary_operation[binary_operation[binary_operation[name[jdn] + constant[1]] <ast.Mod object at 0x7da2590d6920> constant[7]] + constant[1]]] | keyword[def] identifier[rosh_hashana_dow] ( identifier[self] ):
literal[string]
identifier[jdn] = identifier[conv] . identifier[hdate_to_jdn] ( identifier[HebrewDate] ( identifier[self] . identifier[hdate] . identifier[year] , identifier[Months] . identifier[Tishrei] , literal[int] ))
keyword[return] ( identifier[jdn] + literal[int] )% literal[int] + literal[int] | def rosh_hashana_dow(self):
"""Return the Hebrew day of week for Rosh Hashana."""
jdn = conv.hdate_to_jdn(HebrewDate(self.hdate.year, Months.Tishrei, 1))
return (jdn + 1) % 7 + 1 |
def _findRow(subNo, model):
"""Finds a row in a given model which has a column with a given
number."""
items = model.findItems(str(subNo))
if len(items) == 0:
return None
if len(items) > 1:
raise IndexError("Too many items with sub number %s" % subNo)
return items[0].row() | def function[_findRow, parameter[subNo, model]]:
constant[Finds a row in a given model which has a column with a given
number.]
variable[items] assign[=] call[name[model].findItems, parameter[call[name[str], parameter[name[subNo]]]]]
if compare[call[name[len], parameter[name[items]]] equal[==] constant[0]] begin[:]
return[constant[None]]
if compare[call[name[len], parameter[name[items]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da2054a5360>
return[call[call[name[items]][constant[0]].row, parameter[]]] | keyword[def] identifier[_findRow] ( identifier[subNo] , identifier[model] ):
literal[string]
identifier[items] = identifier[model] . identifier[findItems] ( identifier[str] ( identifier[subNo] ))
keyword[if] identifier[len] ( identifier[items] )== literal[int] :
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[items] )> literal[int] :
keyword[raise] identifier[IndexError] ( literal[string] % identifier[subNo] )
keyword[return] identifier[items] [ literal[int] ]. identifier[row] () | def _findRow(subNo, model):
"""Finds a row in a given model which has a column with a given
number."""
items = model.findItems(str(subNo))
if len(items) == 0:
return None # depends on [control=['if'], data=[]]
if len(items) > 1:
raise IndexError('Too many items with sub number %s' % subNo) # depends on [control=['if'], data=[]]
return items[0].row() |
def d2logpdf_df2(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the second derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d^{2}\\log p(y|\\lambda(f))}{df^{2}} = \\frac{d^{2}\\log p(y|\\lambda(f))}{d^{2}\\lambda(f)}\\left(\\frac{d\\lambda(f)}{df}\\right)^{2} + \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d^{2}\\lambda(f)}{df^{2}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: second derivative of log likelihood evaluated for this point (diagonal only)
:rtype: 1xN array
"""
if isinstance(self.gp_link, link_functions.Identity):
d2logpdf_df2 = self.d2logpdf_dlink2(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d2link_df2 = self.gp_link.d2transf_df2(f)
d2logpdf_df2 = chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2)
return d2logpdf_df2 | def function[d2logpdf_df2, parameter[self, f, y, Y_metadata]]:
constant[
Evaluates the link function link(f) then computes the second derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\frac{d^{2}\log p(y|\lambda(f))}{df^{2}} = \frac{d^{2}\log p(y|\lambda(f))}{d^{2}\lambda(f)}\left(\frac{d\lambda(f)}{df}\right)^{2} + \frac{d\log p(y|\lambda(f))}{d\lambda(f)}\frac{d^{2}\lambda(f)}{df^{2}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: second derivative of log likelihood evaluated for this point (diagonal only)
:rtype: 1xN array
]
if call[name[isinstance], parameter[name[self].gp_link, name[link_functions].Identity]] begin[:]
variable[d2logpdf_df2] assign[=] call[name[self].d2logpdf_dlink2, parameter[name[f], name[y]]]
return[name[d2logpdf_df2]] | keyword[def] identifier[d2logpdf_df2] ( identifier[self] , identifier[f] , identifier[y] , identifier[Y_metadata] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[gp_link] , identifier[link_functions] . identifier[Identity] ):
identifier[d2logpdf_df2] = identifier[self] . identifier[d2logpdf_dlink2] ( identifier[f] , identifier[y] , identifier[Y_metadata] = identifier[Y_metadata] )
keyword[else] :
identifier[inv_link_f] = identifier[self] . identifier[gp_link] . identifier[transf] ( identifier[f] )
identifier[d2logpdf_dlink2] = identifier[self] . identifier[d2logpdf_dlink2] ( identifier[inv_link_f] , identifier[y] , identifier[Y_metadata] = identifier[Y_metadata] )
identifier[dlink_df] = identifier[self] . identifier[gp_link] . identifier[dtransf_df] ( identifier[f] )
identifier[dlogpdf_dlink] = identifier[self] . identifier[dlogpdf_dlink] ( identifier[inv_link_f] , identifier[y] , identifier[Y_metadata] = identifier[Y_metadata] )
identifier[d2link_df2] = identifier[self] . identifier[gp_link] . identifier[d2transf_df2] ( identifier[f] )
identifier[d2logpdf_df2] = identifier[chain_2] ( identifier[d2logpdf_dlink2] , identifier[dlink_df] , identifier[dlogpdf_dlink] , identifier[d2link_df2] )
keyword[return] identifier[d2logpdf_df2] | def d2logpdf_df2(self, f, y, Y_metadata=None):
"""
Evaluates the link function link(f) then computes the second derivative of log likelihood using it
Uses the Faa di Bruno's formula for the chain rule
.. math::
\\frac{d^{2}\\log p(y|\\lambda(f))}{df^{2}} = \\frac{d^{2}\\log p(y|\\lambda(f))}{d^{2}\\lambda(f)}\\left(\\frac{d\\lambda(f)}{df}\\right)^{2} + \\frac{d\\log p(y|\\lambda(f))}{d\\lambda(f)}\\frac{d^{2}\\lambda(f)}{df^{2}}
:param f: latent variables f
:type f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution - not used
:returns: second derivative of log likelihood evaluated for this point (diagonal only)
:rtype: 1xN array
"""
if isinstance(self.gp_link, link_functions.Identity):
d2logpdf_df2 = self.d2logpdf_dlink2(f, y, Y_metadata=Y_metadata) # depends on [control=['if'], data=[]]
else:
inv_link_f = self.gp_link.transf(f)
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d2link_df2 = self.gp_link.d2transf_df2(f)
d2logpdf_df2 = chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2)
return d2logpdf_df2 |
def free_parameters(self):
"""
Returns a dictionary of free parameters for this source.
We use the parameter path as the key because it's
guaranteed to be unique, unlike the parameter name.
:return:
"""
free_parameters = collections.OrderedDict()
for component in self._components.values():
for par in component.shape.parameters.values():
if par.free:
free_parameters[par.path] = par
for par in self.position.parameters.values():
if par.free:
free_parameters[par.path] = par
return free_parameters | def function[free_parameters, parameter[self]]:
constant[
Returns a dictionary of free parameters for this source.
We use the parameter path as the key because it's
guaranteed to be unique, unlike the parameter name.
:return:
]
variable[free_parameters] assign[=] call[name[collections].OrderedDict, parameter[]]
for taget[name[component]] in starred[call[name[self]._components.values, parameter[]]] begin[:]
for taget[name[par]] in starred[call[name[component].shape.parameters.values, parameter[]]] begin[:]
if name[par].free begin[:]
call[name[free_parameters]][name[par].path] assign[=] name[par]
for taget[name[par]] in starred[call[name[self].position.parameters.values, parameter[]]] begin[:]
if name[par].free begin[:]
call[name[free_parameters]][name[par].path] assign[=] name[par]
return[name[free_parameters]] | keyword[def] identifier[free_parameters] ( identifier[self] ):
literal[string]
identifier[free_parameters] = identifier[collections] . identifier[OrderedDict] ()
keyword[for] identifier[component] keyword[in] identifier[self] . identifier[_components] . identifier[values] ():
keyword[for] identifier[par] keyword[in] identifier[component] . identifier[shape] . identifier[parameters] . identifier[values] ():
keyword[if] identifier[par] . identifier[free] :
identifier[free_parameters] [ identifier[par] . identifier[path] ]= identifier[par]
keyword[for] identifier[par] keyword[in] identifier[self] . identifier[position] . identifier[parameters] . identifier[values] ():
keyword[if] identifier[par] . identifier[free] :
identifier[free_parameters] [ identifier[par] . identifier[path] ]= identifier[par]
keyword[return] identifier[free_parameters] | def free_parameters(self):
"""
Returns a dictionary of free parameters for this source.
We use the parameter path as the key because it's
guaranteed to be unique, unlike the parameter name.
:return:
"""
free_parameters = collections.OrderedDict()
for component in self._components.values():
for par in component.shape.parameters.values():
if par.free:
free_parameters[par.path] = par # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['par']] # depends on [control=['for'], data=['component']]
for par in self.position.parameters.values():
if par.free:
free_parameters[par.path] = par # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['par']]
return free_parameters |
def save(self,
filename="phonopy_params.yaml",
settings=None):
"""Save parameters in Phonopy instants into file.
Parameters
----------
filename: str, optional
File name. Default is "phonopy_params.yaml"
settings: dict, optional
It is described which parameters are written out. Only
the settings expected to be updated from the following
default settings are needed to be set in the dictionary.
The possible parameters and their default settings are:
{'force_sets': True,
'displacements': True,
'force_constants': False,
'born_effective_charge': True,
'dielectric_constant': True}
"""
phpy_yaml = PhonopyYaml(calculator=self._calculator,
settings=settings)
phpy_yaml.set_phonon_info(self)
with open(filename, 'w') as w:
w.write(str(phpy_yaml)) | def function[save, parameter[self, filename, settings]]:
constant[Save parameters in Phonopy instants into file.
Parameters
----------
filename: str, optional
File name. Default is "phonopy_params.yaml"
settings: dict, optional
It is described which parameters are written out. Only
the settings expected to be updated from the following
default settings are needed to be set in the dictionary.
The possible parameters and their default settings are:
{'force_sets': True,
'displacements': True,
'force_constants': False,
'born_effective_charge': True,
'dielectric_constant': True}
]
variable[phpy_yaml] assign[=] call[name[PhonopyYaml], parameter[]]
call[name[phpy_yaml].set_phonon_info, parameter[name[self]]]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[w].write, parameter[call[name[str], parameter[name[phpy_yaml]]]]] | keyword[def] identifier[save] ( identifier[self] ,
identifier[filename] = literal[string] ,
identifier[settings] = keyword[None] ):
literal[string]
identifier[phpy_yaml] = identifier[PhonopyYaml] ( identifier[calculator] = identifier[self] . identifier[_calculator] ,
identifier[settings] = identifier[settings] )
identifier[phpy_yaml] . identifier[set_phonon_info] ( identifier[self] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[w] :
identifier[w] . identifier[write] ( identifier[str] ( identifier[phpy_yaml] )) | def save(self, filename='phonopy_params.yaml', settings=None):
"""Save parameters in Phonopy instants into file.
Parameters
----------
filename: str, optional
File name. Default is "phonopy_params.yaml"
settings: dict, optional
It is described which parameters are written out. Only
the settings expected to be updated from the following
default settings are needed to be set in the dictionary.
The possible parameters and their default settings are:
{'force_sets': True,
'displacements': True,
'force_constants': False,
'born_effective_charge': True,
'dielectric_constant': True}
"""
phpy_yaml = PhonopyYaml(calculator=self._calculator, settings=settings)
phpy_yaml.set_phonon_info(self)
with open(filename, 'w') as w:
w.write(str(phpy_yaml)) # depends on [control=['with'], data=['w']] |
def unsigned_input(outpoint, redeem_script=None, sequence=None):
'''
Outpoint, byte-like, int -> TxIn
'''
if redeem_script is not None and sequence is None:
sequence = guess_sequence(redeem_script)
if sequence is None:
sequence = 0xFFFFFFFE
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=sequence) | def function[unsigned_input, parameter[outpoint, redeem_script, sequence]]:
constant[
Outpoint, byte-like, int -> TxIn
]
if <ast.BoolOp object at 0x7da1b06775e0> begin[:]
variable[sequence] assign[=] call[name[guess_sequence], parameter[name[redeem_script]]]
if compare[name[sequence] is constant[None]] begin[:]
variable[sequence] assign[=] constant[4294967294]
return[call[name[tb].make_legacy_input, parameter[]]] | keyword[def] identifier[unsigned_input] ( identifier[outpoint] , identifier[redeem_script] = keyword[None] , identifier[sequence] = keyword[None] ):
literal[string]
keyword[if] identifier[redeem_script] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sequence] keyword[is] keyword[None] :
identifier[sequence] = identifier[guess_sequence] ( identifier[redeem_script] )
keyword[if] identifier[sequence] keyword[is] keyword[None] :
identifier[sequence] = literal[int]
keyword[return] identifier[tb] . identifier[make_legacy_input] (
identifier[outpoint] = identifier[outpoint] ,
identifier[stack_script] = literal[string] ,
identifier[redeem_script] = literal[string] ,
identifier[sequence] = identifier[sequence] ) | def unsigned_input(outpoint, redeem_script=None, sequence=None):
"""
Outpoint, byte-like, int -> TxIn
"""
if redeem_script is not None and sequence is None:
sequence = guess_sequence(redeem_script) # depends on [control=['if'], data=[]]
if sequence is None:
sequence = 4294967294 # depends on [control=['if'], data=['sequence']]
return tb.make_legacy_input(outpoint=outpoint, stack_script=b'', redeem_script=b'', sequence=sequence) |
def add_external_tracker(self, bug_ids, ext_bz_bug_id, ext_type_id=None,
ext_type_description=None, ext_type_url=None,
ext_status=None, ext_description=None,
ext_priority=None):
"""
Wrapper method to allow adding of external tracking bugs using the
ExternalBugs::WebService::add_external_bug method.
This is documented at
https://bugzilla.redhat.com/docs/en/html/api/extensions/ExternalBugs/lib/WebService.html#add_external_bug
bug_ids: A single bug id or list of bug ids to have external trackers
added.
ext_bz_bug_id: The external bug id (ie: the bug number in the
external tracker).
ext_type_id: The external tracker id as used by Bugzilla.
ext_type_description: The external tracker description as used by
Bugzilla.
ext_type_url: The external tracker url as used by Bugzilla.
ext_status: The status of the external bug.
ext_description: The description of the external bug.
ext_priority: The priority of the external bug.
"""
param_dict = {'ext_bz_bug_id': ext_bz_bug_id}
if ext_type_id is not None:
param_dict['ext_type_id'] = ext_type_id
if ext_type_description is not None:
param_dict['ext_type_description'] = ext_type_description
if ext_type_url is not None:
param_dict['ext_type_url'] = ext_type_url
if ext_status is not None:
param_dict['ext_status'] = ext_status
if ext_description is not None:
param_dict['ext_description'] = ext_description
if ext_priority is not None:
param_dict['ext_priority'] = ext_priority
params = {
'bug_ids': self._listify(bug_ids),
'external_bugs': [param_dict],
}
log.debug("Calling ExternalBugs.add_external_bug(%s)", params)
return self._proxy.ExternalBugs.add_external_bug(params) | def function[add_external_tracker, parameter[self, bug_ids, ext_bz_bug_id, ext_type_id, ext_type_description, ext_type_url, ext_status, ext_description, ext_priority]]:
constant[
Wrapper method to allow adding of external tracking bugs using the
ExternalBugs::WebService::add_external_bug method.
This is documented at
https://bugzilla.redhat.com/docs/en/html/api/extensions/ExternalBugs/lib/WebService.html#add_external_bug
bug_ids: A single bug id or list of bug ids to have external trackers
added.
ext_bz_bug_id: The external bug id (ie: the bug number in the
external tracker).
ext_type_id: The external tracker id as used by Bugzilla.
ext_type_description: The external tracker description as used by
Bugzilla.
ext_type_url: The external tracker url as used by Bugzilla.
ext_status: The status of the external bug.
ext_description: The description of the external bug.
ext_priority: The priority of the external bug.
]
variable[param_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0d19630>], [<ast.Name object at 0x7da1b0d1b4f0>]]
if compare[name[ext_type_id] is_not constant[None]] begin[:]
call[name[param_dict]][constant[ext_type_id]] assign[=] name[ext_type_id]
if compare[name[ext_type_description] is_not constant[None]] begin[:]
call[name[param_dict]][constant[ext_type_description]] assign[=] name[ext_type_description]
if compare[name[ext_type_url] is_not constant[None]] begin[:]
call[name[param_dict]][constant[ext_type_url]] assign[=] name[ext_type_url]
if compare[name[ext_status] is_not constant[None]] begin[:]
call[name[param_dict]][constant[ext_status]] assign[=] name[ext_status]
if compare[name[ext_description] is_not constant[None]] begin[:]
call[name[param_dict]][constant[ext_description]] assign[=] name[ext_description]
if compare[name[ext_priority] is_not constant[None]] begin[:]
call[name[param_dict]][constant[ext_priority]] assign[=] name[ext_priority]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0d18280>, <ast.Constant object at 0x7da1b0d1b130>], [<ast.Call object at 0x7da1b0d19570>, <ast.List object at 0x7da1b0d1b610>]]
call[name[log].debug, parameter[constant[Calling ExternalBugs.add_external_bug(%s)], name[params]]]
return[call[name[self]._proxy.ExternalBugs.add_external_bug, parameter[name[params]]]] | keyword[def] identifier[add_external_tracker] ( identifier[self] , identifier[bug_ids] , identifier[ext_bz_bug_id] , identifier[ext_type_id] = keyword[None] ,
identifier[ext_type_description] = keyword[None] , identifier[ext_type_url] = keyword[None] ,
identifier[ext_status] = keyword[None] , identifier[ext_description] = keyword[None] ,
identifier[ext_priority] = keyword[None] ):
literal[string]
identifier[param_dict] ={ literal[string] : identifier[ext_bz_bug_id] }
keyword[if] identifier[ext_type_id] keyword[is] keyword[not] keyword[None] :
identifier[param_dict] [ literal[string] ]= identifier[ext_type_id]
keyword[if] identifier[ext_type_description] keyword[is] keyword[not] keyword[None] :
identifier[param_dict] [ literal[string] ]= identifier[ext_type_description]
keyword[if] identifier[ext_type_url] keyword[is] keyword[not] keyword[None] :
identifier[param_dict] [ literal[string] ]= identifier[ext_type_url]
keyword[if] identifier[ext_status] keyword[is] keyword[not] keyword[None] :
identifier[param_dict] [ literal[string] ]= identifier[ext_status]
keyword[if] identifier[ext_description] keyword[is] keyword[not] keyword[None] :
identifier[param_dict] [ literal[string] ]= identifier[ext_description]
keyword[if] identifier[ext_priority] keyword[is] keyword[not] keyword[None] :
identifier[param_dict] [ literal[string] ]= identifier[ext_priority]
identifier[params] ={
literal[string] : identifier[self] . identifier[_listify] ( identifier[bug_ids] ),
literal[string] :[ identifier[param_dict] ],
}
identifier[log] . identifier[debug] ( literal[string] , identifier[params] )
keyword[return] identifier[self] . identifier[_proxy] . identifier[ExternalBugs] . identifier[add_external_bug] ( identifier[params] ) | def add_external_tracker(self, bug_ids, ext_bz_bug_id, ext_type_id=None, ext_type_description=None, ext_type_url=None, ext_status=None, ext_description=None, ext_priority=None):
"""
Wrapper method to allow adding of external tracking bugs using the
ExternalBugs::WebService::add_external_bug method.
This is documented at
https://bugzilla.redhat.com/docs/en/html/api/extensions/ExternalBugs/lib/WebService.html#add_external_bug
bug_ids: A single bug id or list of bug ids to have external trackers
added.
ext_bz_bug_id: The external bug id (ie: the bug number in the
external tracker).
ext_type_id: The external tracker id as used by Bugzilla.
ext_type_description: The external tracker description as used by
Bugzilla.
ext_type_url: The external tracker url as used by Bugzilla.
ext_status: The status of the external bug.
ext_description: The description of the external bug.
ext_priority: The priority of the external bug.
"""
param_dict = {'ext_bz_bug_id': ext_bz_bug_id}
if ext_type_id is not None:
param_dict['ext_type_id'] = ext_type_id # depends on [control=['if'], data=['ext_type_id']]
if ext_type_description is not None:
param_dict['ext_type_description'] = ext_type_description # depends on [control=['if'], data=['ext_type_description']]
if ext_type_url is not None:
param_dict['ext_type_url'] = ext_type_url # depends on [control=['if'], data=['ext_type_url']]
if ext_status is not None:
param_dict['ext_status'] = ext_status # depends on [control=['if'], data=['ext_status']]
if ext_description is not None:
param_dict['ext_description'] = ext_description # depends on [control=['if'], data=['ext_description']]
if ext_priority is not None:
param_dict['ext_priority'] = ext_priority # depends on [control=['if'], data=['ext_priority']]
params = {'bug_ids': self._listify(bug_ids), 'external_bugs': [param_dict]}
log.debug('Calling ExternalBugs.add_external_bug(%s)', params)
return self._proxy.ExternalBugs.add_external_bug(params) |
def sink_update(
self, project, sink_name, filter_, destination, unique_writer_identity=False
):
"""API call: update a sink resource.
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
:rtype: dict
:returns: The sink resource returned from the API (converted from a
protobuf to a dictionary).
"""
path = "projects/%s/sinks/%s" % (project, sink_name)
sink_pb = LogSink(name=path, filter=filter_, destination=destination)
sink_pb = self._gapic_api.update_sink(
path, sink_pb, unique_writer_identity=unique_writer_identity
)
# NOTE: LogSink message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
return MessageToDict(sink_pb) | def function[sink_update, parameter[self, project, sink_name, filter_, destination, unique_writer_identity]]:
constant[API call: update a sink resource.
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
:rtype: dict
:returns: The sink resource returned from the API (converted from a
protobuf to a dictionary).
]
variable[path] assign[=] binary_operation[constant[projects/%s/sinks/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2347f10>, <ast.Name object at 0x7da1b23464d0>]]]
variable[sink_pb] assign[=] call[name[LogSink], parameter[]]
variable[sink_pb] assign[=] call[name[self]._gapic_api.update_sink, parameter[name[path], name[sink_pb]]]
return[call[name[MessageToDict], parameter[name[sink_pb]]]] | keyword[def] identifier[sink_update] (
identifier[self] , identifier[project] , identifier[sink_name] , identifier[filter_] , identifier[destination] , identifier[unique_writer_identity] = keyword[False]
):
literal[string]
identifier[path] = literal[string] %( identifier[project] , identifier[sink_name] )
identifier[sink_pb] = identifier[LogSink] ( identifier[name] = identifier[path] , identifier[filter] = identifier[filter_] , identifier[destination] = identifier[destination] )
identifier[sink_pb] = identifier[self] . identifier[_gapic_api] . identifier[update_sink] (
identifier[path] , identifier[sink_pb] , identifier[unique_writer_identity] = identifier[unique_writer_identity]
)
keyword[return] identifier[MessageToDict] ( identifier[sink_pb] ) | def sink_update(self, project, sink_name, filter_, destination, unique_writer_identity=False):
"""API call: update a sink resource.
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
:rtype: dict
:returns: The sink resource returned from the API (converted from a
protobuf to a dictionary).
"""
path = 'projects/%s/sinks/%s' % (project, sink_name)
sink_pb = LogSink(name=path, filter=filter_, destination=destination)
sink_pb = self._gapic_api.update_sink(path, sink_pb, unique_writer_identity=unique_writer_identity)
# NOTE: LogSink message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
return MessageToDict(sink_pb) |
def get_allposts(self):
''' Return all posts in blog sorted by date
'''
result = self.client.posts(self.blog, offset = 0, limit = 1)
try:
total_posts = result['total_posts']
except:
raise phasetumblr_errors.TumblrBlogException(result['meta']['msg'])
delta = (total_posts / 10) + 1
all_posts = []
posts_ids = []
for j in range(delta):
start = j * 10
end = (j + 1) * 10
posts = self.client.posts(self.blog, offset = start, limit = end)['posts']
if not len(posts):
break
for i in posts:
if i['id'] in posts_ids:
continue
description = split_body(i['body'])
body = split_body(i['body'], 1)
post = {}
post['title'] = i['title']
post['link'] = i['post_url']
post['date'] = datetime.strptime(i['date'], '%Y-%m-%d %H:%M:%S %Z')
post['tags'] = i['tags']
post['id'] = i['id']
post['body'] = body
post['description'] = description
all_posts.append(post)
posts_ids.append(i['id'])
newlist = sorted(all_posts, key=lambda k: k['date'])
return newlist | def function[get_allposts, parameter[self]]:
constant[ Return all posts in blog sorted by date
]
variable[result] assign[=] call[name[self].client.posts, parameter[name[self].blog]]
<ast.Try object at 0x7da1b15f7040>
variable[delta] assign[=] binary_operation[binary_operation[name[total_posts] / constant[10]] + constant[1]]
variable[all_posts] assign[=] list[[]]
variable[posts_ids] assign[=] list[[]]
for taget[name[j]] in starred[call[name[range], parameter[name[delta]]]] begin[:]
variable[start] assign[=] binary_operation[name[j] * constant[10]]
variable[end] assign[=] binary_operation[binary_operation[name[j] + constant[1]] * constant[10]]
variable[posts] assign[=] call[call[name[self].client.posts, parameter[name[self].blog]]][constant[posts]]
if <ast.UnaryOp object at 0x7da1b15f5720> begin[:]
break
for taget[name[i]] in starred[name[posts]] begin[:]
if compare[call[name[i]][constant[id]] in name[posts_ids]] begin[:]
continue
variable[description] assign[=] call[name[split_body], parameter[call[name[i]][constant[body]]]]
variable[body] assign[=] call[name[split_body], parameter[call[name[i]][constant[body]], constant[1]]]
variable[post] assign[=] dictionary[[], []]
call[name[post]][constant[title]] assign[=] call[name[i]][constant[title]]
call[name[post]][constant[link]] assign[=] call[name[i]][constant[post_url]]
call[name[post]][constant[date]] assign[=] call[name[datetime].strptime, parameter[call[name[i]][constant[date]], constant[%Y-%m-%d %H:%M:%S %Z]]]
call[name[post]][constant[tags]] assign[=] call[name[i]][constant[tags]]
call[name[post]][constant[id]] assign[=] call[name[i]][constant[id]]
call[name[post]][constant[body]] assign[=] name[body]
call[name[post]][constant[description]] assign[=] name[description]
call[name[all_posts].append, parameter[name[post]]]
call[name[posts_ids].append, parameter[call[name[i]][constant[id]]]]
variable[newlist] assign[=] call[name[sorted], parameter[name[all_posts]]]
return[name[newlist]] | keyword[def] identifier[get_allposts] ( identifier[self] ):
literal[string]
identifier[result] = identifier[self] . identifier[client] . identifier[posts] ( identifier[self] . identifier[blog] , identifier[offset] = literal[int] , identifier[limit] = literal[int] )
keyword[try] :
identifier[total_posts] = identifier[result] [ literal[string] ]
keyword[except] :
keyword[raise] identifier[phasetumblr_errors] . identifier[TumblrBlogException] ( identifier[result] [ literal[string] ][ literal[string] ])
identifier[delta] =( identifier[total_posts] / literal[int] )+ literal[int]
identifier[all_posts] =[]
identifier[posts_ids] =[]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[delta] ):
identifier[start] = identifier[j] * literal[int]
identifier[end] =( identifier[j] + literal[int] )* literal[int]
identifier[posts] = identifier[self] . identifier[client] . identifier[posts] ( identifier[self] . identifier[blog] , identifier[offset] = identifier[start] , identifier[limit] = identifier[end] )[ literal[string] ]
keyword[if] keyword[not] identifier[len] ( identifier[posts] ):
keyword[break]
keyword[for] identifier[i] keyword[in] identifier[posts] :
keyword[if] identifier[i] [ literal[string] ] keyword[in] identifier[posts_ids] :
keyword[continue]
identifier[description] = identifier[split_body] ( identifier[i] [ literal[string] ])
identifier[body] = identifier[split_body] ( identifier[i] [ literal[string] ], literal[int] )
identifier[post] ={}
identifier[post] [ literal[string] ]= identifier[i] [ literal[string] ]
identifier[post] [ literal[string] ]= identifier[i] [ literal[string] ]
identifier[post] [ literal[string] ]= identifier[datetime] . identifier[strptime] ( identifier[i] [ literal[string] ], literal[string] )
identifier[post] [ literal[string] ]= identifier[i] [ literal[string] ]
identifier[post] [ literal[string] ]= identifier[i] [ literal[string] ]
identifier[post] [ literal[string] ]= identifier[body]
identifier[post] [ literal[string] ]= identifier[description]
identifier[all_posts] . identifier[append] ( identifier[post] )
identifier[posts_ids] . identifier[append] ( identifier[i] [ literal[string] ])
identifier[newlist] = identifier[sorted] ( identifier[all_posts] , identifier[key] = keyword[lambda] identifier[k] : identifier[k] [ literal[string] ])
keyword[return] identifier[newlist] | def get_allposts(self):
""" Return all posts in blog sorted by date
"""
result = self.client.posts(self.blog, offset=0, limit=1)
try:
total_posts = result['total_posts'] # depends on [control=['try'], data=[]]
except:
raise phasetumblr_errors.TumblrBlogException(result['meta']['msg']) # depends on [control=['except'], data=[]]
delta = total_posts / 10 + 1
all_posts = []
posts_ids = []
for j in range(delta):
start = j * 10
end = (j + 1) * 10
posts = self.client.posts(self.blog, offset=start, limit=end)['posts']
if not len(posts):
break # depends on [control=['if'], data=[]]
for i in posts:
if i['id'] in posts_ids:
continue # depends on [control=['if'], data=[]]
description = split_body(i['body'])
body = split_body(i['body'], 1)
post = {}
post['title'] = i['title']
post['link'] = i['post_url']
post['date'] = datetime.strptime(i['date'], '%Y-%m-%d %H:%M:%S %Z')
post['tags'] = i['tags']
post['id'] = i['id']
post['body'] = body
post['description'] = description
all_posts.append(post)
posts_ids.append(i['id']) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['j']]
newlist = sorted(all_posts, key=lambda k: k['date'])
return newlist |
def make_json_response(rv):
""" Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse
"""
# Tuple of (response, status, headers)
rv, status, headers = normalize_response_value(rv)
# JsonResponse
if isinstance(rv, JsonResponse):
return rv
# Data
return JsonResponse(rv, status, headers) | def function[make_json_response, parameter[rv]]:
constant[ Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse
]
<ast.Tuple object at 0x7da18eb55690> assign[=] call[name[normalize_response_value], parameter[name[rv]]]
if call[name[isinstance], parameter[name[rv], name[JsonResponse]]] begin[:]
return[name[rv]]
return[call[name[JsonResponse], parameter[name[rv], name[status], name[headers]]]] | keyword[def] identifier[make_json_response] ( identifier[rv] ):
literal[string]
identifier[rv] , identifier[status] , identifier[headers] = identifier[normalize_response_value] ( identifier[rv] )
keyword[if] identifier[isinstance] ( identifier[rv] , identifier[JsonResponse] ):
keyword[return] identifier[rv]
keyword[return] identifier[JsonResponse] ( identifier[rv] , identifier[status] , identifier[headers] ) | def make_json_response(rv):
""" Make JsonResponse
:param rv: Response: the object to encode, or tuple (response, status, headers)
:type rv: tuple|*
:rtype: JsonResponse
"""
# Tuple of (response, status, headers)
(rv, status, headers) = normalize_response_value(rv)
# JsonResponse
if isinstance(rv, JsonResponse):
return rv # depends on [control=['if'], data=[]]
# Data
return JsonResponse(rv, status, headers) |
def handle(self, connection_id, message_content):
"""
The simplest authorization type will be Trust. If Trust authorization
is enabled, the validator will trust the connection and approve any
roles requested that are available on that endpoint. If the requester
wishes to gain access to every role it has permission to access, it can
request access to the role ALL, and the validator will respond with all
available roles. If the permission verifier deems the connection to not
have access to a role, the connection has not sent a ConnectionRequest
or a the connection has already recieved a AuthorizationTrustResponse,
an AuthorizatinViolation will be returned and the connection will be
closed.
"""
if self._network.get_connection_status(connection_id) != \
ConnectionStatus.CONNECTION_REQUEST:
LOGGER.debug("Connection's previous message was not a"
" ConnectionRequest, Remove connection to %s",
connection_id)
violation = AuthorizationViolation(
violation=RoleType.Value("NETWORK"))
return HandlerResult(
HandlerStatus.RETURN_AND_CLOSE,
message_out=violation,
message_type=validator_pb2.Message
.AUTHORIZATION_VIOLATION)
request = AuthorizationTrustRequest()
request.ParseFromString(message_content)
# Check that the connection's public key is allowed by the network role
roles = self._network.roles
for role in request.roles:
if role == RoleType.Value("NETWORK") or role == \
RoleType.Value("ALL"):
permitted = False
if "network" in roles:
permitted = self._permission_verifier.check_network_role(
request.public_key)
if not permitted:
violation = AuthorizationViolation(
violation=RoleType.Value("NETWORK"))
return HandlerResult(
HandlerStatus.RETURN_AND_CLOSE,
message_out=violation,
message_type=validator_pb2.Message
.AUTHORIZATION_VIOLATION)
self._network.update_connection_public_key(connection_id,
request.public_key)
if RoleType.Value("NETWORK") in request.roles:
# Need to send ConnectionRequest to authorize ourself with the
# connection if they initialized the connection
try:
is_outbound_connection = self._network.is_outbound_connection(
connection_id)
except KeyError:
# Connection has gone away, drop message
return HandlerResult(HandlerStatus.DROP)
if not is_outbound_connection:
self._network.send_connect_request(connection_id)
else:
# If this is an outbound connection, authorization is complete
# for both connections and peering can begin.
self._gossip.connect_success(connection_id)
auth_trust_response = AuthorizationTrustResponse(
roles=[RoleType.Value("NETWORK")])
LOGGER.debug("Connection: %s is approved", connection_id)
self._network.update_connection_status(
connection_id,
ConnectionStatus.CONNECTED)
return HandlerResult(
HandlerStatus.RETURN,
message_out=auth_trust_response,
message_type=validator_pb2.Message.AUTHORIZATION_TRUST_RESPONSE) | def function[handle, parameter[self, connection_id, message_content]]:
constant[
The simplest authorization type will be Trust. If Trust authorization
is enabled, the validator will trust the connection and approve any
roles requested that are available on that endpoint. If the requester
wishes to gain access to every role it has permission to access, it can
request access to the role ALL, and the validator will respond with all
available roles. If the permission verifier deems the connection to not
have access to a role, the connection has not sent a ConnectionRequest
or a the connection has already recieved a AuthorizationTrustResponse,
an AuthorizatinViolation will be returned and the connection will be
closed.
]
if compare[call[name[self]._network.get_connection_status, parameter[name[connection_id]]] not_equal[!=] name[ConnectionStatus].CONNECTION_REQUEST] begin[:]
call[name[LOGGER].debug, parameter[constant[Connection's previous message was not a ConnectionRequest, Remove connection to %s], name[connection_id]]]
variable[violation] assign[=] call[name[AuthorizationViolation], parameter[]]
return[call[name[HandlerResult], parameter[name[HandlerStatus].RETURN_AND_CLOSE]]]
variable[request] assign[=] call[name[AuthorizationTrustRequest], parameter[]]
call[name[request].ParseFromString, parameter[name[message_content]]]
variable[roles] assign[=] name[self]._network.roles
for taget[name[role]] in starred[name[request].roles] begin[:]
if <ast.BoolOp object at 0x7da1b26ade10> begin[:]
variable[permitted] assign[=] constant[False]
if compare[constant[network] in name[roles]] begin[:]
variable[permitted] assign[=] call[name[self]._permission_verifier.check_network_role, parameter[name[request].public_key]]
if <ast.UnaryOp object at 0x7da1b26aeb60> begin[:]
variable[violation] assign[=] call[name[AuthorizationViolation], parameter[]]
return[call[name[HandlerResult], parameter[name[HandlerStatus].RETURN_AND_CLOSE]]]
call[name[self]._network.update_connection_public_key, parameter[name[connection_id], name[request].public_key]]
if compare[call[name[RoleType].Value, parameter[constant[NETWORK]]] in name[request].roles] begin[:]
<ast.Try object at 0x7da18f00c070>
if <ast.UnaryOp object at 0x7da18f00e560> begin[:]
call[name[self]._network.send_connect_request, parameter[name[connection_id]]]
variable[auth_trust_response] assign[=] call[name[AuthorizationTrustResponse], parameter[]]
call[name[LOGGER].debug, parameter[constant[Connection: %s is approved], name[connection_id]]]
call[name[self]._network.update_connection_status, parameter[name[connection_id], name[ConnectionStatus].CONNECTED]]
return[call[name[HandlerResult], parameter[name[HandlerStatus].RETURN]]] | keyword[def] identifier[handle] ( identifier[self] , identifier[connection_id] , identifier[message_content] ):
literal[string]
keyword[if] identifier[self] . identifier[_network] . identifier[get_connection_status] ( identifier[connection_id] )!= identifier[ConnectionStatus] . identifier[CONNECTION_REQUEST] :
identifier[LOGGER] . identifier[debug] ( literal[string]
literal[string] ,
identifier[connection_id] )
identifier[violation] = identifier[AuthorizationViolation] (
identifier[violation] = identifier[RoleType] . identifier[Value] ( literal[string] ))
keyword[return] identifier[HandlerResult] (
identifier[HandlerStatus] . identifier[RETURN_AND_CLOSE] ,
identifier[message_out] = identifier[violation] ,
identifier[message_type] = identifier[validator_pb2] . identifier[Message]
. identifier[AUTHORIZATION_VIOLATION] )
identifier[request] = identifier[AuthorizationTrustRequest] ()
identifier[request] . identifier[ParseFromString] ( identifier[message_content] )
identifier[roles] = identifier[self] . identifier[_network] . identifier[roles]
keyword[for] identifier[role] keyword[in] identifier[request] . identifier[roles] :
keyword[if] identifier[role] == identifier[RoleType] . identifier[Value] ( literal[string] ) keyword[or] identifier[role] == identifier[RoleType] . identifier[Value] ( literal[string] ):
identifier[permitted] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[roles] :
identifier[permitted] = identifier[self] . identifier[_permission_verifier] . identifier[check_network_role] (
identifier[request] . identifier[public_key] )
keyword[if] keyword[not] identifier[permitted] :
identifier[violation] = identifier[AuthorizationViolation] (
identifier[violation] = identifier[RoleType] . identifier[Value] ( literal[string] ))
keyword[return] identifier[HandlerResult] (
identifier[HandlerStatus] . identifier[RETURN_AND_CLOSE] ,
identifier[message_out] = identifier[violation] ,
identifier[message_type] = identifier[validator_pb2] . identifier[Message]
. identifier[AUTHORIZATION_VIOLATION] )
identifier[self] . identifier[_network] . identifier[update_connection_public_key] ( identifier[connection_id] ,
identifier[request] . identifier[public_key] )
keyword[if] identifier[RoleType] . identifier[Value] ( literal[string] ) keyword[in] identifier[request] . identifier[roles] :
keyword[try] :
identifier[is_outbound_connection] = identifier[self] . identifier[_network] . identifier[is_outbound_connection] (
identifier[connection_id] )
keyword[except] identifier[KeyError] :
keyword[return] identifier[HandlerResult] ( identifier[HandlerStatus] . identifier[DROP] )
keyword[if] keyword[not] identifier[is_outbound_connection] :
identifier[self] . identifier[_network] . identifier[send_connect_request] ( identifier[connection_id] )
keyword[else] :
identifier[self] . identifier[_gossip] . identifier[connect_success] ( identifier[connection_id] )
identifier[auth_trust_response] = identifier[AuthorizationTrustResponse] (
identifier[roles] =[ identifier[RoleType] . identifier[Value] ( literal[string] )])
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[connection_id] )
identifier[self] . identifier[_network] . identifier[update_connection_status] (
identifier[connection_id] ,
identifier[ConnectionStatus] . identifier[CONNECTED] )
keyword[return] identifier[HandlerResult] (
identifier[HandlerStatus] . identifier[RETURN] ,
identifier[message_out] = identifier[auth_trust_response] ,
identifier[message_type] = identifier[validator_pb2] . identifier[Message] . identifier[AUTHORIZATION_TRUST_RESPONSE] ) | def handle(self, connection_id, message_content):
"""
The simplest authorization type will be Trust. If Trust authorization
is enabled, the validator will trust the connection and approve any
roles requested that are available on that endpoint. If the requester
wishes to gain access to every role it has permission to access, it can
request access to the role ALL, and the validator will respond with all
available roles. If the permission verifier deems the connection to not
have access to a role, the connection has not sent a ConnectionRequest
or a the connection has already recieved a AuthorizationTrustResponse,
an AuthorizatinViolation will be returned and the connection will be
closed.
"""
if self._network.get_connection_status(connection_id) != ConnectionStatus.CONNECTION_REQUEST:
LOGGER.debug("Connection's previous message was not a ConnectionRequest, Remove connection to %s", connection_id)
violation = AuthorizationViolation(violation=RoleType.Value('NETWORK'))
return HandlerResult(HandlerStatus.RETURN_AND_CLOSE, message_out=violation, message_type=validator_pb2.Message.AUTHORIZATION_VIOLATION) # depends on [control=['if'], data=[]]
request = AuthorizationTrustRequest()
request.ParseFromString(message_content)
# Check that the connection's public key is allowed by the network role
roles = self._network.roles
for role in request.roles:
if role == RoleType.Value('NETWORK') or role == RoleType.Value('ALL'):
permitted = False
if 'network' in roles:
permitted = self._permission_verifier.check_network_role(request.public_key) # depends on [control=['if'], data=[]]
if not permitted:
violation = AuthorizationViolation(violation=RoleType.Value('NETWORK'))
return HandlerResult(HandlerStatus.RETURN_AND_CLOSE, message_out=violation, message_type=validator_pb2.Message.AUTHORIZATION_VIOLATION) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['role']]
self._network.update_connection_public_key(connection_id, request.public_key)
if RoleType.Value('NETWORK') in request.roles:
# Need to send ConnectionRequest to authorize ourself with the
# connection if they initialized the connection
try:
is_outbound_connection = self._network.is_outbound_connection(connection_id) # depends on [control=['try'], data=[]]
except KeyError:
# Connection has gone away, drop message
return HandlerResult(HandlerStatus.DROP) # depends on [control=['except'], data=[]]
if not is_outbound_connection:
self._network.send_connect_request(connection_id) # depends on [control=['if'], data=[]]
else:
# If this is an outbound connection, authorization is complete
# for both connections and peering can begin.
self._gossip.connect_success(connection_id) # depends on [control=['if'], data=[]]
auth_trust_response = AuthorizationTrustResponse(roles=[RoleType.Value('NETWORK')])
LOGGER.debug('Connection: %s is approved', connection_id)
self._network.update_connection_status(connection_id, ConnectionStatus.CONNECTED)
return HandlerResult(HandlerStatus.RETURN, message_out=auth_trust_response, message_type=validator_pb2.Message.AUTHORIZATION_TRUST_RESPONSE) |
def _execActions(self, type, msg):
""" Execute Registered Actions """
for action in self.ACTIONS:
action(type, msg) | def function[_execActions, parameter[self, type, msg]]:
constant[ Execute Registered Actions ]
for taget[name[action]] in starred[name[self].ACTIONS] begin[:]
call[name[action], parameter[name[type], name[msg]]] | keyword[def] identifier[_execActions] ( identifier[self] , identifier[type] , identifier[msg] ):
literal[string]
keyword[for] identifier[action] keyword[in] identifier[self] . identifier[ACTIONS] :
identifier[action] ( identifier[type] , identifier[msg] ) | def _execActions(self, type, msg):
""" Execute Registered Actions """
for action in self.ACTIONS:
action(type, msg) # depends on [control=['for'], data=['action']] |
def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False):
"""Apply RANSAC.
This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set.
Parameters
------------
model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector)
eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model
data : array (DxN) where D is dimensionality and N number of samples
"""
M = None
max_consensus = 0
all_idx = list(range(data.shape[1]))
final_consensus = []
for k in range(num_iter):
np.random.shuffle(all_idx)
model_set = all_idx[:num_points]
x = data[:, model_set]
m = model_func(x)
model_error = eval_func(m, data)
assert model_error.ndim == 1
assert model_error.size == data.shape[1]
consensus_idx = np.flatnonzero(model_error < threshold)
if len(consensus_idx) > max_consensus:
M = m
max_consensus = len(consensus_idx)
final_consensus = consensus_idx
# Recalculate using current consensus set?
if recalculate and len(final_consensus) > 0:
final_consensus_set = data[:, final_consensus]
M = model_func(final_consensus_set)
return (M, final_consensus) | def function[RANSAC, parameter[model_func, eval_func, data, num_points, num_iter, threshold, recalculate]]:
constant[Apply RANSAC.
This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set.
Parameters
------------
model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector)
eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model
data : array (DxN) where D is dimensionality and N number of samples
]
variable[M] assign[=] constant[None]
variable[max_consensus] assign[=] constant[0]
variable[all_idx] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[data].shape][constant[1]]]]]]
variable[final_consensus] assign[=] list[[]]
for taget[name[k]] in starred[call[name[range], parameter[name[num_iter]]]] begin[:]
call[name[np].random.shuffle, parameter[name[all_idx]]]
variable[model_set] assign[=] call[name[all_idx]][<ast.Slice object at 0x7da18bc728c0>]
variable[x] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da18bc726e0>, <ast.Name object at 0x7da18bc72cb0>]]]
variable[m] assign[=] call[name[model_func], parameter[name[x]]]
variable[model_error] assign[=] call[name[eval_func], parameter[name[m], name[data]]]
assert[compare[name[model_error].ndim equal[==] constant[1]]]
assert[compare[name[model_error].size equal[==] call[name[data].shape][constant[1]]]]
variable[consensus_idx] assign[=] call[name[np].flatnonzero, parameter[compare[name[model_error] less[<] name[threshold]]]]
if compare[call[name[len], parameter[name[consensus_idx]]] greater[>] name[max_consensus]] begin[:]
variable[M] assign[=] name[m]
variable[max_consensus] assign[=] call[name[len], parameter[name[consensus_idx]]]
variable[final_consensus] assign[=] name[consensus_idx]
if <ast.BoolOp object at 0x7da18bc727d0> begin[:]
variable[final_consensus_set] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da18bc73280>, <ast.Name object at 0x7da18bc73040>]]]
variable[M] assign[=] call[name[model_func], parameter[name[final_consensus_set]]]
return[tuple[[<ast.Name object at 0x7da18bc73e50>, <ast.Name object at 0x7da18bc73ee0>]]] | keyword[def] identifier[RANSAC] ( identifier[model_func] , identifier[eval_func] , identifier[data] , identifier[num_points] , identifier[num_iter] , identifier[threshold] , identifier[recalculate] = keyword[False] ):
literal[string]
identifier[M] = keyword[None]
identifier[max_consensus] = literal[int]
identifier[all_idx] = identifier[list] ( identifier[range] ( identifier[data] . identifier[shape] [ literal[int] ]))
identifier[final_consensus] =[]
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[num_iter] ):
identifier[np] . identifier[random] . identifier[shuffle] ( identifier[all_idx] )
identifier[model_set] = identifier[all_idx] [: identifier[num_points] ]
identifier[x] = identifier[data] [:, identifier[model_set] ]
identifier[m] = identifier[model_func] ( identifier[x] )
identifier[model_error] = identifier[eval_func] ( identifier[m] , identifier[data] )
keyword[assert] identifier[model_error] . identifier[ndim] == literal[int]
keyword[assert] identifier[model_error] . identifier[size] == identifier[data] . identifier[shape] [ literal[int] ]
identifier[consensus_idx] = identifier[np] . identifier[flatnonzero] ( identifier[model_error] < identifier[threshold] )
keyword[if] identifier[len] ( identifier[consensus_idx] )> identifier[max_consensus] :
identifier[M] = identifier[m]
identifier[max_consensus] = identifier[len] ( identifier[consensus_idx] )
identifier[final_consensus] = identifier[consensus_idx]
keyword[if] identifier[recalculate] keyword[and] identifier[len] ( identifier[final_consensus] )> literal[int] :
identifier[final_consensus_set] = identifier[data] [:, identifier[final_consensus] ]
identifier[M] = identifier[model_func] ( identifier[final_consensus_set] )
keyword[return] ( identifier[M] , identifier[final_consensus] ) | def RANSAC(model_func, eval_func, data, num_points, num_iter, threshold, recalculate=False):
"""Apply RANSAC.
This RANSAC implementation will choose the best model based on the number of points in the consensus set. At evaluation time the model is created using num_points points. Then it will be recalculated using the points in the consensus set.
Parameters
------------
model_func: Takes a data parameter of size DxK where K is the number of points needed to construct the model and returns the model (Mx1 vector)
eval_func: Takes a model parameter (Lx1) and one or more data points (DxC, C>=1) and calculates the score of the point(s) relative to the selected model
data : array (DxN) where D is dimensionality and N number of samples
"""
M = None
max_consensus = 0
all_idx = list(range(data.shape[1]))
final_consensus = []
for k in range(num_iter):
np.random.shuffle(all_idx)
model_set = all_idx[:num_points]
x = data[:, model_set]
m = model_func(x)
model_error = eval_func(m, data)
assert model_error.ndim == 1
assert model_error.size == data.shape[1]
consensus_idx = np.flatnonzero(model_error < threshold)
if len(consensus_idx) > max_consensus:
M = m
max_consensus = len(consensus_idx)
final_consensus = consensus_idx # depends on [control=['if'], data=['max_consensus']] # depends on [control=['for'], data=[]]
# Recalculate using current consensus set?
if recalculate and len(final_consensus) > 0:
final_consensus_set = data[:, final_consensus]
M = model_func(final_consensus_set) # depends on [control=['if'], data=[]]
return (M, final_consensus) |
def reflect(self, bind='__all__', app=None):
"""Reflects tables from the database.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'reflect', skip_tables=True) | def function[reflect, parameter[self, bind, app]]:
constant[Reflects tables from the database.
.. versionchanged:: 0.12
Parameters were added
]
call[name[self]._execute_for_all_tables, parameter[name[app], name[bind], constant[reflect]]] | keyword[def] identifier[reflect] ( identifier[self] , identifier[bind] = literal[string] , identifier[app] = keyword[None] ):
literal[string]
identifier[self] . identifier[_execute_for_all_tables] ( identifier[app] , identifier[bind] , literal[string] , identifier[skip_tables] = keyword[True] ) | def reflect(self, bind='__all__', app=None):
"""Reflects tables from the database.
.. versionchanged:: 0.12
Parameters were added
"""
self._execute_for_all_tables(app, bind, 'reflect', skip_tables=True) |
def backward_sampling_qmc(self, M):
"""QMC version of backward sampling.
Parameters
----------
M : int
number of trajectories
Note
----
Use this only on the history of a SQMC algorithm.
"""
self._check_h_orders()
u = qmc.sobol(M, self.T)
# the final particles have not been sorted
hT = hilbert.hilbert_sort(self.X[-1])
# searchsorted to avoid having to sort in place u according to u[:,T-1]
idx = np.searchsorted(np.cumsum(self.wgt[-1].W[hT]), u[:, -1])
paths = [self.X[-1][hT][idx], ]
for t in reversed(range(self.T - 1)):
idx = np.empty(M, 'int')
for m, xn in enumerate(paths[-1]):
lwm = self.wgt[t].lw + self.model.logpt(t + 1, self.X[t], xn)
# use ordered version here
cw = np.cumsum(rs.exp_and_normalise(lwm[self.h_orders[t]]))
idx[m] = np.searchsorted(cw, u[m, t])
paths.append(self.X[t][self.h_orders[t]][idx])
paths.reverse()
return paths | def function[backward_sampling_qmc, parameter[self, M]]:
constant[QMC version of backward sampling.
Parameters
----------
M : int
number of trajectories
Note
----
Use this only on the history of a SQMC algorithm.
]
call[name[self]._check_h_orders, parameter[]]
variable[u] assign[=] call[name[qmc].sobol, parameter[name[M], name[self].T]]
variable[hT] assign[=] call[name[hilbert].hilbert_sort, parameter[call[name[self].X][<ast.UnaryOp object at 0x7da1b15f0580>]]]
variable[idx] assign[=] call[name[np].searchsorted, parameter[call[name[np].cumsum, parameter[call[call[name[self].wgt][<ast.UnaryOp object at 0x7da1b15f25c0>].W][name[hT]]]], call[name[u]][tuple[[<ast.Slice object at 0x7da1b15f0fd0>, <ast.UnaryOp object at 0x7da1b15f0190>]]]]]
variable[paths] assign[=] list[[<ast.Subscript object at 0x7da1b15f15a0>]]
for taget[name[t]] in starred[call[name[reversed], parameter[call[name[range], parameter[binary_operation[name[self].T - constant[1]]]]]]] begin[:]
variable[idx] assign[=] call[name[np].empty, parameter[name[M], constant[int]]]
for taget[tuple[[<ast.Name object at 0x7da2047e9f90>, <ast.Name object at 0x7da2047eafb0>]]] in starred[call[name[enumerate], parameter[call[name[paths]][<ast.UnaryOp object at 0x7da2047ebca0>]]]] begin[:]
variable[lwm] assign[=] binary_operation[call[name[self].wgt][name[t]].lw + call[name[self].model.logpt, parameter[binary_operation[name[t] + constant[1]], call[name[self].X][name[t]], name[xn]]]]
variable[cw] assign[=] call[name[np].cumsum, parameter[call[name[rs].exp_and_normalise, parameter[call[name[lwm]][call[name[self].h_orders][name[t]]]]]]]
call[name[idx]][name[m]] assign[=] call[name[np].searchsorted, parameter[name[cw], call[name[u]][tuple[[<ast.Name object at 0x7da1b15f2320>, <ast.Name object at 0x7da1b15f0cd0>]]]]]
call[name[paths].append, parameter[call[call[call[name[self].X][name[t]]][call[name[self].h_orders][name[t]]]][name[idx]]]]
call[name[paths].reverse, parameter[]]
return[name[paths]] | keyword[def] identifier[backward_sampling_qmc] ( identifier[self] , identifier[M] ):
literal[string]
identifier[self] . identifier[_check_h_orders] ()
identifier[u] = identifier[qmc] . identifier[sobol] ( identifier[M] , identifier[self] . identifier[T] )
identifier[hT] = identifier[hilbert] . identifier[hilbert_sort] ( identifier[self] . identifier[X] [- literal[int] ])
identifier[idx] = identifier[np] . identifier[searchsorted] ( identifier[np] . identifier[cumsum] ( identifier[self] . identifier[wgt] [- literal[int] ]. identifier[W] [ identifier[hT] ]), identifier[u] [:,- literal[int] ])
identifier[paths] =[ identifier[self] . identifier[X] [- literal[int] ][ identifier[hT] ][ identifier[idx] ],]
keyword[for] identifier[t] keyword[in] identifier[reversed] ( identifier[range] ( identifier[self] . identifier[T] - literal[int] )):
identifier[idx] = identifier[np] . identifier[empty] ( identifier[M] , literal[string] )
keyword[for] identifier[m] , identifier[xn] keyword[in] identifier[enumerate] ( identifier[paths] [- literal[int] ]):
identifier[lwm] = identifier[self] . identifier[wgt] [ identifier[t] ]. identifier[lw] + identifier[self] . identifier[model] . identifier[logpt] ( identifier[t] + literal[int] , identifier[self] . identifier[X] [ identifier[t] ], identifier[xn] )
identifier[cw] = identifier[np] . identifier[cumsum] ( identifier[rs] . identifier[exp_and_normalise] ( identifier[lwm] [ identifier[self] . identifier[h_orders] [ identifier[t] ]]))
identifier[idx] [ identifier[m] ]= identifier[np] . identifier[searchsorted] ( identifier[cw] , identifier[u] [ identifier[m] , identifier[t] ])
identifier[paths] . identifier[append] ( identifier[self] . identifier[X] [ identifier[t] ][ identifier[self] . identifier[h_orders] [ identifier[t] ]][ identifier[idx] ])
identifier[paths] . identifier[reverse] ()
keyword[return] identifier[paths] | def backward_sampling_qmc(self, M):
"""QMC version of backward sampling.
Parameters
----------
M : int
number of trajectories
Note
----
Use this only on the history of a SQMC algorithm.
"""
self._check_h_orders()
u = qmc.sobol(M, self.T)
# the final particles have not been sorted
hT = hilbert.hilbert_sort(self.X[-1])
# searchsorted to avoid having to sort in place u according to u[:,T-1]
idx = np.searchsorted(np.cumsum(self.wgt[-1].W[hT]), u[:, -1])
paths = [self.X[-1][hT][idx]]
for t in reversed(range(self.T - 1)):
idx = np.empty(M, 'int')
for (m, xn) in enumerate(paths[-1]):
lwm = self.wgt[t].lw + self.model.logpt(t + 1, self.X[t], xn)
# use ordered version here
cw = np.cumsum(rs.exp_and_normalise(lwm[self.h_orders[t]]))
idx[m] = np.searchsorted(cw, u[m, t]) # depends on [control=['for'], data=[]]
paths.append(self.X[t][self.h_orders[t]][idx]) # depends on [control=['for'], data=['t']]
paths.reverse()
return paths |
def needs_quotes(s):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in DOT_KEYWORDS:
return False
chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0]
if chars and not ID_RE_DBL_QUOTED.match(s) and not ID_RE_HTML.match(s):
return True
for test_re in [ID_RE_ALPHA_NUMS, ID_RE_NUM, ID_RE_DBL_QUOTED, ID_RE_HTML, ID_RE_ALPHA_NUMS_WITH_PORTS]:
if test_re.match(s):
return False
m = ID_RE_WITH_PORT.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True | def function[needs_quotes, parameter[s]]:
constant[Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
]
if compare[name[s] in name[DOT_KEYWORDS]] begin[:]
return[constant[False]]
variable[chars] assign[=] <ast.ListComp object at 0x7da1b02e7850>
if <ast.BoolOp object at 0x7da1b02e45e0> begin[:]
return[constant[True]]
for taget[name[test_re]] in starred[list[[<ast.Name object at 0x7da1b02e6e00>, <ast.Name object at 0x7da1b02e50c0>, <ast.Name object at 0x7da1b02e7bb0>, <ast.Name object at 0x7da1b02e7070>, <ast.Name object at 0x7da1b02e7a60>]]] begin[:]
if call[name[test_re].match, parameter[name[s]]] begin[:]
return[constant[False]]
variable[m] assign[=] call[name[ID_RE_WITH_PORT].match, parameter[name[s]]]
if name[m] begin[:]
return[<ast.BoolOp object at 0x7da1b02e6710>]
return[constant[True]] | keyword[def] identifier[needs_quotes] ( identifier[s] ):
literal[string]
keyword[if] identifier[s] keyword[in] identifier[DOT_KEYWORDS] :
keyword[return] keyword[False]
identifier[chars] =[ identifier[ord] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[s] keyword[if] identifier[ord] ( identifier[c] )> literal[int] keyword[or] identifier[ord] ( identifier[c] )== literal[int] ]
keyword[if] identifier[chars] keyword[and] keyword[not] identifier[ID_RE_DBL_QUOTED] . identifier[match] ( identifier[s] ) keyword[and] keyword[not] identifier[ID_RE_HTML] . identifier[match] ( identifier[s] ):
keyword[return] keyword[True]
keyword[for] identifier[test_re] keyword[in] [ identifier[ID_RE_ALPHA_NUMS] , identifier[ID_RE_NUM] , identifier[ID_RE_DBL_QUOTED] , identifier[ID_RE_HTML] , identifier[ID_RE_ALPHA_NUMS_WITH_PORTS] ]:
keyword[if] identifier[test_re] . identifier[match] ( identifier[s] ):
keyword[return] keyword[False]
identifier[m] = identifier[ID_RE_WITH_PORT] . identifier[match] ( identifier[s] )
keyword[if] identifier[m] :
keyword[return] identifier[needs_quotes] ( identifier[m] . identifier[group] ( literal[int] )) keyword[or] identifier[needs_quotes] ( identifier[m] . identifier[group] ( literal[int] ))
keyword[return] keyword[True] | def needs_quotes(s):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in DOT_KEYWORDS:
return False # depends on [control=['if'], data=[]]
chars = [ord(c) for c in s if ord(c) > 127 or ord(c) == 0]
if chars and (not ID_RE_DBL_QUOTED.match(s)) and (not ID_RE_HTML.match(s)):
return True # depends on [control=['if'], data=[]]
for test_re in [ID_RE_ALPHA_NUMS, ID_RE_NUM, ID_RE_DBL_QUOTED, ID_RE_HTML, ID_RE_ALPHA_NUMS_WITH_PORTS]:
if test_re.match(s):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['test_re']]
m = ID_RE_WITH_PORT.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2)) # depends on [control=['if'], data=[]]
return True |
def load(self):
"""
Load the definition of the rule, searching in the specified rule dirs first, then in the built-in definitions
:return: None
"""
file_name_valid = False
rule_type_valid = False
# Look for a locally-defined rule
for rule_dir in self.rule_dirs:
file_path = os.path.join(rule_dir, self.file_name) if rule_dir else self.file_name
if os.path.isfile(file_path):
self.file_path = file_path
file_name_valid = True
break
# Look for a built-in rule
if not file_name_valid:
for rule_type in self.rule_types:
if self.file_name.startswith(rule_type):
self.file_path = os.path.join(self.rules_data_path, self.file_name)
rule_type_valid = True
file_name_valid = True
break
if not rule_type_valid:
for rule_type in self.rule_types:
self.file_path = os.path.join(self.rules_data_path, rule_type, self.file_name)
if os.path.isfile(self.file_path):
file_name_valid = True
break
else:
if os.path.isfile(self.file_path):
file_name_valid = True
if not file_name_valid:
printError('Error: could not find %s' % self.file_name)
else:
try:
with open(self.file_path, 'rt') as f:
self.string_definition = f.read()
self.load_from_string_definition()
except Exception as e:
printException(e)
printError('Failed to load rule defined in %s' % file_path) | def function[load, parameter[self]]:
constant[
Load the definition of the rule, searching in the specified rule dirs first, then in the built-in definitions
:return: None
]
variable[file_name_valid] assign[=] constant[False]
variable[rule_type_valid] assign[=] constant[False]
for taget[name[rule_dir]] in starred[name[self].rule_dirs] begin[:]
variable[file_path] assign[=] <ast.IfExp object at 0x7da204960df0>
if call[name[os].path.isfile, parameter[name[file_path]]] begin[:]
name[self].file_path assign[=] name[file_path]
variable[file_name_valid] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da204962f80> begin[:]
for taget[name[rule_type]] in starred[name[self].rule_types] begin[:]
if call[name[self].file_name.startswith, parameter[name[rule_type]]] begin[:]
name[self].file_path assign[=] call[name[os].path.join, parameter[name[self].rules_data_path, name[self].file_name]]
variable[rule_type_valid] assign[=] constant[True]
variable[file_name_valid] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da204962b60> begin[:]
for taget[name[rule_type]] in starred[name[self].rule_types] begin[:]
name[self].file_path assign[=] call[name[os].path.join, parameter[name[self].rules_data_path, name[rule_type], name[self].file_name]]
if call[name[os].path.isfile, parameter[name[self].file_path]] begin[:]
variable[file_name_valid] assign[=] constant[True]
break
if <ast.UnaryOp object at 0x7da204963d30> begin[:]
call[name[printError], parameter[binary_operation[constant[Error: could not find %s] <ast.Mod object at 0x7da2590d6920> name[self].file_name]]] | keyword[def] identifier[load] ( identifier[self] ):
literal[string]
identifier[file_name_valid] = keyword[False]
identifier[rule_type_valid] = keyword[False]
keyword[for] identifier[rule_dir] keyword[in] identifier[self] . identifier[rule_dirs] :
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[rule_dir] , identifier[self] . identifier[file_name] ) keyword[if] identifier[rule_dir] keyword[else] identifier[self] . identifier[file_name]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_path] ):
identifier[self] . identifier[file_path] = identifier[file_path]
identifier[file_name_valid] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[file_name_valid] :
keyword[for] identifier[rule_type] keyword[in] identifier[self] . identifier[rule_types] :
keyword[if] identifier[self] . identifier[file_name] . identifier[startswith] ( identifier[rule_type] ):
identifier[self] . identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[rules_data_path] , identifier[self] . identifier[file_name] )
identifier[rule_type_valid] = keyword[True]
identifier[file_name_valid] = keyword[True]
keyword[break]
keyword[if] keyword[not] identifier[rule_type_valid] :
keyword[for] identifier[rule_type] keyword[in] identifier[self] . identifier[rule_types] :
identifier[self] . identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[rules_data_path] , identifier[rule_type] , identifier[self] . identifier[file_name] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[self] . identifier[file_path] ):
identifier[file_name_valid] = keyword[True]
keyword[break]
keyword[else] :
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[self] . identifier[file_path] ):
identifier[file_name_valid] = keyword[True]
keyword[if] keyword[not] identifier[file_name_valid] :
identifier[printError] ( literal[string] % identifier[self] . identifier[file_name] )
keyword[else] :
keyword[try] :
keyword[with] identifier[open] ( identifier[self] . identifier[file_path] , literal[string] ) keyword[as] identifier[f] :
identifier[self] . identifier[string_definition] = identifier[f] . identifier[read] ()
identifier[self] . identifier[load_from_string_definition] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[printException] ( identifier[e] )
identifier[printError] ( literal[string] % identifier[file_path] ) | def load(self):
"""
Load the definition of the rule, searching in the specified rule dirs first, then in the built-in definitions
:return: None
"""
file_name_valid = False
rule_type_valid = False
# Look for a locally-defined rule
for rule_dir in self.rule_dirs:
file_path = os.path.join(rule_dir, self.file_name) if rule_dir else self.file_name
if os.path.isfile(file_path):
self.file_path = file_path
file_name_valid = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule_dir']]
# Look for a built-in rule
if not file_name_valid:
for rule_type in self.rule_types:
if self.file_name.startswith(rule_type):
self.file_path = os.path.join(self.rules_data_path, self.file_name)
rule_type_valid = True
file_name_valid = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule_type']]
if not rule_type_valid:
for rule_type in self.rule_types:
self.file_path = os.path.join(self.rules_data_path, rule_type, self.file_name)
if os.path.isfile(self.file_path):
file_name_valid = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule_type']] # depends on [control=['if'], data=[]]
elif os.path.isfile(self.file_path):
file_name_valid = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not file_name_valid:
printError('Error: could not find %s' % self.file_name) # depends on [control=['if'], data=[]]
else:
try:
with open(self.file_path, 'rt') as f:
self.string_definition = f.read()
self.load_from_string_definition() # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except Exception as e:
printException(e)
printError('Failed to load rule defined in %s' % file_path) # depends on [control=['except'], data=['e']] |
def _poll_update_interval(self):
""" update the polling interval to be used next iteration """
# Increase by 1 second every 3 polls
if old_div(self.poll_count, 3) > self.poll_interval_level:
self.poll_interval_level += 1
self.poll_interval_s += 1
self.logger.info(
"Increased polling interval to %d seconds", self.poll_interval_s
) | def function[_poll_update_interval, parameter[self]]:
constant[ update the polling interval to be used next iteration ]
if compare[call[name[old_div], parameter[name[self].poll_count, constant[3]]] greater[>] name[self].poll_interval_level] begin[:]
<ast.AugAssign object at 0x7da1b16095d0>
<ast.AugAssign object at 0x7da1b1663e20>
call[name[self].logger.info, parameter[constant[Increased polling interval to %d seconds], name[self].poll_interval_s]] | keyword[def] identifier[_poll_update_interval] ( identifier[self] ):
literal[string]
keyword[if] identifier[old_div] ( identifier[self] . identifier[poll_count] , literal[int] )> identifier[self] . identifier[poll_interval_level] :
identifier[self] . identifier[poll_interval_level] += literal[int]
identifier[self] . identifier[poll_interval_s] += literal[int]
identifier[self] . identifier[logger] . identifier[info] (
literal[string] , identifier[self] . identifier[poll_interval_s]
) | def _poll_update_interval(self):
""" update the polling interval to be used next iteration """
# Increase by 1 second every 3 polls
if old_div(self.poll_count, 3) > self.poll_interval_level:
self.poll_interval_level += 1
self.poll_interval_s += 1
self.logger.info('Increased polling interval to %d seconds', self.poll_interval_s) # depends on [control=['if'], data=[]] |
def delete_cached_files(self, prefixes=[], suffixes=[]):
"""
Deletes any cached files matching the prefixes or suffixes given
"""
for filename in listdir(self.cache_directory_path):
delete = (
any([filename.endswith(ext) for ext in suffixes]) or
any([filename.startswith(pre) for pre in prefixes]))
if delete:
path = join(self.cache_directory_path, filename)
logger.info("Deleting %s", path)
remove(path) | def function[delete_cached_files, parameter[self, prefixes, suffixes]]:
constant[
Deletes any cached files matching the prefixes or suffixes given
]
for taget[name[filename]] in starred[call[name[listdir], parameter[name[self].cache_directory_path]]] begin[:]
variable[delete] assign[=] <ast.BoolOp object at 0x7da1b0744730>
if name[delete] begin[:]
variable[path] assign[=] call[name[join], parameter[name[self].cache_directory_path, name[filename]]]
call[name[logger].info, parameter[constant[Deleting %s], name[path]]]
call[name[remove], parameter[name[path]]] | keyword[def] identifier[delete_cached_files] ( identifier[self] , identifier[prefixes] =[], identifier[suffixes] =[]):
literal[string]
keyword[for] identifier[filename] keyword[in] identifier[listdir] ( identifier[self] . identifier[cache_directory_path] ):
identifier[delete] =(
identifier[any] ([ identifier[filename] . identifier[endswith] ( identifier[ext] ) keyword[for] identifier[ext] keyword[in] identifier[suffixes] ]) keyword[or]
identifier[any] ([ identifier[filename] . identifier[startswith] ( identifier[pre] ) keyword[for] identifier[pre] keyword[in] identifier[prefixes] ]))
keyword[if] identifier[delete] :
identifier[path] = identifier[join] ( identifier[self] . identifier[cache_directory_path] , identifier[filename] )
identifier[logger] . identifier[info] ( literal[string] , identifier[path] )
identifier[remove] ( identifier[path] ) | def delete_cached_files(self, prefixes=[], suffixes=[]):
"""
Deletes any cached files matching the prefixes or suffixes given
"""
for filename in listdir(self.cache_directory_path):
delete = any([filename.endswith(ext) for ext in suffixes]) or any([filename.startswith(pre) for pre in prefixes])
if delete:
path = join(self.cache_directory_path, filename)
logger.info('Deleting %s', path)
remove(path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']] |
def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += bytes_to_unicode(replacement)
else:
chars += c
return chars | def function[remove_diacritics, parameter[self_or_cls, identifier]]:
constant[
Remove diacritics and accents from the input leaving other
unicode characters alone.]
variable[chars] assign[=] constant[]
for taget[name[c]] in starred[name[identifier]] begin[:]
variable[replacement] assign[=] call[call[name[unicodedata].normalize, parameter[constant[NFKD], name[c]]].encode, parameter[constant[ASCII], constant[ignore]]]
if compare[name[replacement] not_equal[!=] constant[]] begin[:]
<ast.AugAssign object at 0x7da18dc07a30>
return[name[chars]] | keyword[def] identifier[remove_diacritics] ( identifier[self_or_cls] , identifier[identifier] ):
literal[string]
identifier[chars] = literal[string]
keyword[for] identifier[c] keyword[in] identifier[identifier] :
identifier[replacement] = identifier[unicodedata] . identifier[normalize] ( literal[string] , identifier[c] ). identifier[encode] ( literal[string] , literal[string] )
keyword[if] identifier[replacement] != literal[string] :
identifier[chars] += identifier[bytes_to_unicode] ( identifier[replacement] )
keyword[else] :
identifier[chars] += identifier[c]
keyword[return] identifier[chars] | def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += bytes_to_unicode(replacement) # depends on [control=['if'], data=['replacement']]
else:
chars += c # depends on [control=['for'], data=['c']]
return chars |
def post_attention(self, token, x):
"""Called after self-attention. The memory can be updated here.
Args:
token: Data returned by pre_attention, which can be used to carry over
state related to the current memory operation.
x: a Tensor of data after self-attention and feed-forward
Returns:
a (possibly modified) version of the input x
"""
with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE):
depth = common_layers.shape_list(x)[-1]
actual_batch_size = common_layers.shape_list(x)[0]
memory_output = tf.gather(token["retrieved_mem"],
tf.range(actual_batch_size))
output = tf.add(tf.layers.dense(x, depth, use_bias=False),
tf.layers.dense(memory_output, depth))
with tf.control_dependencies([output]):
with tf.control_dependencies([
self.write(token["x"], token["access_logits"])]):
return tf.identity(output) | def function[post_attention, parameter[self, token, x]]:
constant[Called after self-attention. The memory can be updated here.
Args:
token: Data returned by pre_attention, which can be used to carry over
state related to the current memory operation.
x: a Tensor of data after self-attention and feed-forward
Returns:
a (possibly modified) version of the input x
]
with call[name[tf].variable_scope, parameter[binary_operation[name[self].name + constant[/post_attention]]]] begin[:]
variable[depth] assign[=] call[call[name[common_layers].shape_list, parameter[name[x]]]][<ast.UnaryOp object at 0x7da20c6e6590>]
variable[actual_batch_size] assign[=] call[call[name[common_layers].shape_list, parameter[name[x]]]][constant[0]]
variable[memory_output] assign[=] call[name[tf].gather, parameter[call[name[token]][constant[retrieved_mem]], call[name[tf].range, parameter[name[actual_batch_size]]]]]
variable[output] assign[=] call[name[tf].add, parameter[call[name[tf].layers.dense, parameter[name[x], name[depth]]], call[name[tf].layers.dense, parameter[name[memory_output], name[depth]]]]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Name object at 0x7da20c6e6fb0>]]]] begin[:]
with call[name[tf].control_dependencies, parameter[list[[<ast.Call object at 0x7da20c6e77f0>]]]] begin[:]
return[call[name[tf].identity, parameter[name[output]]]] | keyword[def] identifier[post_attention] ( identifier[self] , identifier[token] , identifier[x] ):
literal[string]
keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[self] . identifier[name] + literal[string] , identifier[reuse] = identifier[tf] . identifier[AUTO_REUSE] ):
identifier[depth] = identifier[common_layers] . identifier[shape_list] ( identifier[x] )[- literal[int] ]
identifier[actual_batch_size] = identifier[common_layers] . identifier[shape_list] ( identifier[x] )[ literal[int] ]
identifier[memory_output] = identifier[tf] . identifier[gather] ( identifier[token] [ literal[string] ],
identifier[tf] . identifier[range] ( identifier[actual_batch_size] ))
identifier[output] = identifier[tf] . identifier[add] ( identifier[tf] . identifier[layers] . identifier[dense] ( identifier[x] , identifier[depth] , identifier[use_bias] = keyword[False] ),
identifier[tf] . identifier[layers] . identifier[dense] ( identifier[memory_output] , identifier[depth] ))
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[output] ]):
keyword[with] identifier[tf] . identifier[control_dependencies] ([
identifier[self] . identifier[write] ( identifier[token] [ literal[string] ], identifier[token] [ literal[string] ])]):
keyword[return] identifier[tf] . identifier[identity] ( identifier[output] ) | def post_attention(self, token, x):
"""Called after self-attention. The memory can be updated here.
Args:
token: Data returned by pre_attention, which can be used to carry over
state related to the current memory operation.
x: a Tensor of data after self-attention and feed-forward
Returns:
a (possibly modified) version of the input x
"""
with tf.variable_scope(self.name + '/post_attention', reuse=tf.AUTO_REUSE):
depth = common_layers.shape_list(x)[-1]
actual_batch_size = common_layers.shape_list(x)[0]
memory_output = tf.gather(token['retrieved_mem'], tf.range(actual_batch_size))
output = tf.add(tf.layers.dense(x, depth, use_bias=False), tf.layers.dense(memory_output, depth))
with tf.control_dependencies([output]):
with tf.control_dependencies([self.write(token['x'], token['access_logits'])]):
return tf.identity(output) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] |
def _expand_to_beam_size(data, beam_size, batch_size, state_info=None):
"""Tile all the states to have batch_size * beam_size on the batch axis.
Parameters
----------
data : A single NDArray/Symbol or nested container with NDArrays/Symbol
Each NDArray/Symbol should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
beam_size : int
Beam size
batch_size : int
Batch size
state_info : Nested structure of dictionary, default None.
Descriptors for states, usually from decoder's ``state_info()``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object that contains NDArrays/Symbols
Each NDArray/Symbol should have shape batch_size * beam_size on the batch axis.
"""
assert not state_info or isinstance(state_info, (type(data), dict)), \
'data and state_info doesn\'t match, ' \
'got: {} vs {}.'.format(type(state_info), type(data))
if isinstance(data, list):
if not state_info:
state_info = [None] * len(data)
return [_expand_to_beam_size(d, beam_size, batch_size, s)
for d, s in zip(data, state_info)]
elif isinstance(data, tuple):
if not state_info:
state_info = [None] * len(data)
state_info = tuple(state_info)
return tuple(_expand_to_beam_size(d, beam_size, batch_size, s)
for d, s in zip(data, state_info))
elif isinstance(data, dict):
if not state_info:
state_info = {k: None for k in data.keys()}
return {k: _expand_to_beam_size(v, beam_size, batch_size, state_info[k])
for k, v in data.items()}
elif isinstance(data, mx.nd.NDArray):
if not state_info:
batch_axis = 0
else:
batch_axis = state_info['__layout__'].find('N')
if data.shape[batch_axis] != batch_size:
raise ValueError('The batch dimension of all the inner elements in states must be '
'{}, Found shape={}'.format(batch_size, data.shape))
new_shape = list(data.shape)
new_shape[batch_axis] = batch_size * beam_size
new_shape = tuple(new_shape)
return data.expand_dims(batch_axis+1)\
.broadcast_axes(axis=batch_axis+1, size=beam_size)\
.reshape(new_shape)
elif isinstance(data, mx.sym.Symbol):
if not state_info:
batch_axis = 0
else:
batch_axis = state_info['__layout__'].find('N')
new_shape = (0, ) * batch_axis + (-3, -2)
return data.expand_dims(batch_axis+1)\
.broadcast_axes(axis=batch_axis+1, size=beam_size)\
.reshape(new_shape)
else:
raise NotImplementedError | def function[_expand_to_beam_size, parameter[data, beam_size, batch_size, state_info]]:
constant[Tile all the states to have batch_size * beam_size on the batch axis.
Parameters
----------
data : A single NDArray/Symbol or nested container with NDArrays/Symbol
Each NDArray/Symbol should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
beam_size : int
Beam size
batch_size : int
Batch size
state_info : Nested structure of dictionary, default None.
Descriptors for states, usually from decoder's ``state_info()``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object that contains NDArrays/Symbols
Each NDArray/Symbol should have shape batch_size * beam_size on the batch axis.
]
assert[<ast.BoolOp object at 0x7da18ede65c0>]
if call[name[isinstance], parameter[name[data], name[list]]] begin[:]
if <ast.UnaryOp object at 0x7da18ede68f0> begin[:]
variable[state_info] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18ede49a0>]] * call[name[len], parameter[name[data]]]]
return[<ast.ListComp object at 0x7da18ede5ea0>] | keyword[def] identifier[_expand_to_beam_size] ( identifier[data] , identifier[beam_size] , identifier[batch_size] , identifier[state_info] = keyword[None] ):
literal[string]
keyword[assert] keyword[not] identifier[state_info] keyword[or] identifier[isinstance] ( identifier[state_info] ,( identifier[type] ( identifier[data] ), identifier[dict] )), literal[string] literal[string] . identifier[format] ( identifier[type] ( identifier[state_info] ), identifier[type] ( identifier[data] ))
keyword[if] identifier[isinstance] ( identifier[data] , identifier[list] ):
keyword[if] keyword[not] identifier[state_info] :
identifier[state_info] =[ keyword[None] ]* identifier[len] ( identifier[data] )
keyword[return] [ identifier[_expand_to_beam_size] ( identifier[d] , identifier[beam_size] , identifier[batch_size] , identifier[s] )
keyword[for] identifier[d] , identifier[s] keyword[in] identifier[zip] ( identifier[data] , identifier[state_info] )]
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[tuple] ):
keyword[if] keyword[not] identifier[state_info] :
identifier[state_info] =[ keyword[None] ]* identifier[len] ( identifier[data] )
identifier[state_info] = identifier[tuple] ( identifier[state_info] )
keyword[return] identifier[tuple] ( identifier[_expand_to_beam_size] ( identifier[d] , identifier[beam_size] , identifier[batch_size] , identifier[s] )
keyword[for] identifier[d] , identifier[s] keyword[in] identifier[zip] ( identifier[data] , identifier[state_info] ))
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[if] keyword[not] identifier[state_info] :
identifier[state_info] ={ identifier[k] : keyword[None] keyword[for] identifier[k] keyword[in] identifier[data] . identifier[keys] ()}
keyword[return] { identifier[k] : identifier[_expand_to_beam_size] ( identifier[v] , identifier[beam_size] , identifier[batch_size] , identifier[state_info] [ identifier[k] ])
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[items] ()}
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[mx] . identifier[nd] . identifier[NDArray] ):
keyword[if] keyword[not] identifier[state_info] :
identifier[batch_axis] = literal[int]
keyword[else] :
identifier[batch_axis] = identifier[state_info] [ literal[string] ]. identifier[find] ( literal[string] )
keyword[if] identifier[data] . identifier[shape] [ identifier[batch_axis] ]!= identifier[batch_size] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[batch_size] , identifier[data] . identifier[shape] ))
identifier[new_shape] = identifier[list] ( identifier[data] . identifier[shape] )
identifier[new_shape] [ identifier[batch_axis] ]= identifier[batch_size] * identifier[beam_size]
identifier[new_shape] = identifier[tuple] ( identifier[new_shape] )
keyword[return] identifier[data] . identifier[expand_dims] ( identifier[batch_axis] + literal[int] ). identifier[broadcast_axes] ( identifier[axis] = identifier[batch_axis] + literal[int] , identifier[size] = identifier[beam_size] ). identifier[reshape] ( identifier[new_shape] )
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[mx] . identifier[sym] . identifier[Symbol] ):
keyword[if] keyword[not] identifier[state_info] :
identifier[batch_axis] = literal[int]
keyword[else] :
identifier[batch_axis] = identifier[state_info] [ literal[string] ]. identifier[find] ( literal[string] )
identifier[new_shape] =( literal[int] ,)* identifier[batch_axis] +(- literal[int] ,- literal[int] )
keyword[return] identifier[data] . identifier[expand_dims] ( identifier[batch_axis] + literal[int] ). identifier[broadcast_axes] ( identifier[axis] = identifier[batch_axis] + literal[int] , identifier[size] = identifier[beam_size] ). identifier[reshape] ( identifier[new_shape] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] | def _expand_to_beam_size(data, beam_size, batch_size, state_info=None):
"""Tile all the states to have batch_size * beam_size on the batch axis.
Parameters
----------
data : A single NDArray/Symbol or nested container with NDArrays/Symbol
Each NDArray/Symbol should have shape (N, ...) when state_info is None,
or same as the layout in state_info when it's not None.
beam_size : int
Beam size
batch_size : int
Batch size
state_info : Nested structure of dictionary, default None.
Descriptors for states, usually from decoder's ``state_info()``.
When None, this method assumes that the batch axis is the first dimension.
Returns
-------
new_states : Object that contains NDArrays/Symbols
Each NDArray/Symbol should have shape batch_size * beam_size on the batch axis.
"""
assert not state_info or isinstance(state_info, (type(data), dict)), "data and state_info doesn't match, got: {} vs {}.".format(type(state_info), type(data))
if isinstance(data, list):
if not state_info:
state_info = [None] * len(data) # depends on [control=['if'], data=[]]
return [_expand_to_beam_size(d, beam_size, batch_size, s) for (d, s) in zip(data, state_info)] # depends on [control=['if'], data=[]]
elif isinstance(data, tuple):
if not state_info:
state_info = [None] * len(data)
state_info = tuple(state_info) # depends on [control=['if'], data=[]]
return tuple((_expand_to_beam_size(d, beam_size, batch_size, s) for (d, s) in zip(data, state_info))) # depends on [control=['if'], data=[]]
elif isinstance(data, dict):
if not state_info:
state_info = {k: None for k in data.keys()} # depends on [control=['if'], data=[]]
return {k: _expand_to_beam_size(v, beam_size, batch_size, state_info[k]) for (k, v) in data.items()} # depends on [control=['if'], data=[]]
elif isinstance(data, mx.nd.NDArray):
if not state_info:
batch_axis = 0 # depends on [control=['if'], data=[]]
else:
batch_axis = state_info['__layout__'].find('N')
if data.shape[batch_axis] != batch_size:
raise ValueError('The batch dimension of all the inner elements in states must be {}, Found shape={}'.format(batch_size, data.shape)) # depends on [control=['if'], data=['batch_size']]
new_shape = list(data.shape)
new_shape[batch_axis] = batch_size * beam_size
new_shape = tuple(new_shape)
return data.expand_dims(batch_axis + 1).broadcast_axes(axis=batch_axis + 1, size=beam_size).reshape(new_shape) # depends on [control=['if'], data=[]]
elif isinstance(data, mx.sym.Symbol):
if not state_info:
batch_axis = 0 # depends on [control=['if'], data=[]]
else:
batch_axis = state_info['__layout__'].find('N')
new_shape = (0,) * batch_axis + (-3, -2)
return data.expand_dims(batch_axis + 1).broadcast_axes(axis=batch_axis + 1, size=beam_size).reshape(new_shape) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError |
def add_route(self, handler, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=False):
'''A helper method to register class instance or
functions as a handler to the application url
routes.
:param handler: function or class instance
:param uri: path of the URL
:param methods: list or tuple of methods allowed, these are overridden
if using a HTTPMethodView
:param host:
:return: function or class instance
'''
stream = False
# Handle HTTPMethodView differently
if hasattr(handler, 'view_class'):
http_methods = (
'GET', 'POST', 'PUT', 'HEAD', 'OPTIONS', 'PATCH', 'DELETE')
methods = set()
for method in http_methods:
_handler = getattr(handler.view_class, method.lower(), None)
if _handler:
methods.add(method)
if hasattr(_handler, 'is_stream'):
stream = True
# handle composition view differently
if isinstance(handler, self.composition_view_class):
methods = handler.handlers.keys()
for _handler in handler.handlers.values():
if hasattr(_handler, 'is_stream'):
stream = True
break
self.route(uri=uri, methods=methods, host=host,
strict_slashes=strict_slashes, stream=stream)(handler)
return handler | def function[add_route, parameter[self, handler, uri, methods, host, strict_slashes]]:
constant[A helper method to register class instance or
functions as a handler to the application url
routes.
:param handler: function or class instance
:param uri: path of the URL
:param methods: list or tuple of methods allowed, these are overridden
if using a HTTPMethodView
:param host:
:return: function or class instance
]
variable[stream] assign[=] constant[False]
if call[name[hasattr], parameter[name[handler], constant[view_class]]] begin[:]
variable[http_methods] assign[=] tuple[[<ast.Constant object at 0x7da1b28bebc0>, <ast.Constant object at 0x7da1b28bd330>, <ast.Constant object at 0x7da1b28bc610>, <ast.Constant object at 0x7da1b28be9e0>, <ast.Constant object at 0x7da1b28bc490>, <ast.Constant object at 0x7da1b28bfeb0>, <ast.Constant object at 0x7da1b28be500>]]
variable[methods] assign[=] call[name[set], parameter[]]
for taget[name[method]] in starred[name[http_methods]] begin[:]
variable[_handler] assign[=] call[name[getattr], parameter[name[handler].view_class, call[name[method].lower, parameter[]], constant[None]]]
if name[_handler] begin[:]
call[name[methods].add, parameter[name[method]]]
if call[name[hasattr], parameter[name[_handler], constant[is_stream]]] begin[:]
variable[stream] assign[=] constant[True]
if call[name[isinstance], parameter[name[handler], name[self].composition_view_class]] begin[:]
variable[methods] assign[=] call[name[handler].handlers.keys, parameter[]]
for taget[name[_handler]] in starred[call[name[handler].handlers.values, parameter[]]] begin[:]
if call[name[hasattr], parameter[name[_handler], constant[is_stream]]] begin[:]
variable[stream] assign[=] constant[True]
break
call[call[name[self].route, parameter[]], parameter[name[handler]]]
return[name[handler]] | keyword[def] identifier[add_route] ( identifier[self] , identifier[handler] , identifier[uri] , identifier[methods] = identifier[frozenset] ({ literal[string] }), identifier[host] = keyword[None] ,
identifier[strict_slashes] = keyword[False] ):
literal[string]
identifier[stream] = keyword[False]
keyword[if] identifier[hasattr] ( identifier[handler] , literal[string] ):
identifier[http_methods] =(
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )
identifier[methods] = identifier[set] ()
keyword[for] identifier[method] keyword[in] identifier[http_methods] :
identifier[_handler] = identifier[getattr] ( identifier[handler] . identifier[view_class] , identifier[method] . identifier[lower] (), keyword[None] )
keyword[if] identifier[_handler] :
identifier[methods] . identifier[add] ( identifier[method] )
keyword[if] identifier[hasattr] ( identifier[_handler] , literal[string] ):
identifier[stream] = keyword[True]
keyword[if] identifier[isinstance] ( identifier[handler] , identifier[self] . identifier[composition_view_class] ):
identifier[methods] = identifier[handler] . identifier[handlers] . identifier[keys] ()
keyword[for] identifier[_handler] keyword[in] identifier[handler] . identifier[handlers] . identifier[values] ():
keyword[if] identifier[hasattr] ( identifier[_handler] , literal[string] ):
identifier[stream] = keyword[True]
keyword[break]
identifier[self] . identifier[route] ( identifier[uri] = identifier[uri] , identifier[methods] = identifier[methods] , identifier[host] = identifier[host] ,
identifier[strict_slashes] = identifier[strict_slashes] , identifier[stream] = identifier[stream] )( identifier[handler] )
keyword[return] identifier[handler] | def add_route(self, handler, uri, methods=frozenset({'GET'}), host=None, strict_slashes=False):
"""A helper method to register class instance or
functions as a handler to the application url
routes.
:param handler: function or class instance
:param uri: path of the URL
:param methods: list or tuple of methods allowed, these are overridden
if using a HTTPMethodView
:param host:
:return: function or class instance
"""
stream = False
# Handle HTTPMethodView differently
if hasattr(handler, 'view_class'):
http_methods = ('GET', 'POST', 'PUT', 'HEAD', 'OPTIONS', 'PATCH', 'DELETE')
methods = set()
for method in http_methods:
_handler = getattr(handler.view_class, method.lower(), None)
if _handler:
methods.add(method)
if hasattr(_handler, 'is_stream'):
stream = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['method']] # depends on [control=['if'], data=[]]
# handle composition view differently
if isinstance(handler, self.composition_view_class):
methods = handler.handlers.keys()
for _handler in handler.handlers.values():
if hasattr(_handler, 'is_stream'):
stream = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['_handler']] # depends on [control=['if'], data=[]]
self.route(uri=uri, methods=methods, host=host, strict_slashes=strict_slashes, stream=stream)(handler)
return handler |
def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for addr, properties in self.eddystone_mappings:
if addr == bt_addr:
return properties
return None | def function[properties_from_mapping, parameter[self, bt_addr]]:
constant[Retrieve properties (namespace, instance) for the specified bt address.]
for taget[tuple[[<ast.Name object at 0x7da20e957820>, <ast.Name object at 0x7da20e957100>]]] in starred[name[self].eddystone_mappings] begin[:]
if compare[name[addr] equal[==] name[bt_addr]] begin[:]
return[name[properties]]
return[constant[None]] | keyword[def] identifier[properties_from_mapping] ( identifier[self] , identifier[bt_addr] ):
literal[string]
keyword[for] identifier[addr] , identifier[properties] keyword[in] identifier[self] . identifier[eddystone_mappings] :
keyword[if] identifier[addr] == identifier[bt_addr] :
keyword[return] identifier[properties]
keyword[return] keyword[None] | def properties_from_mapping(self, bt_addr):
"""Retrieve properties (namespace, instance) for the specified bt address."""
for (addr, properties) in self.eddystone_mappings:
if addr == bt_addr:
return properties # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return None |
def parse_startup_message(self):
"""results in an OmapiStartupMessage
>>> d = b"\\0\\0\\0\\x64\\0\\0\\0\\x18"
>>> next(InBuffer(d).parse_startup_message()).validate()
"""
return parse_map(lambda args: OmapiStartupMessage(*args), parse_chain(self.parse_net32int, lambda _: self.parse_net32int())) | def function[parse_startup_message, parameter[self]]:
constant[results in an OmapiStartupMessage
>>> d = b"\0\0\0\x64\0\0\0\x18"
>>> next(InBuffer(d).parse_startup_message()).validate()
]
return[call[name[parse_map], parameter[<ast.Lambda object at 0x7da20c9900a0>, call[name[parse_chain], parameter[name[self].parse_net32int, <ast.Lambda object at 0x7da20c992aa0>]]]]] | keyword[def] identifier[parse_startup_message] ( identifier[self] ):
literal[string]
keyword[return] identifier[parse_map] ( keyword[lambda] identifier[args] : identifier[OmapiStartupMessage] (* identifier[args] ), identifier[parse_chain] ( identifier[self] . identifier[parse_net32int] , keyword[lambda] identifier[_] : identifier[self] . identifier[parse_net32int] ())) | def parse_startup_message(self):
"""results in an OmapiStartupMessage
>>> d = b"\\0\\0\\0\\x64\\0\\0\\0\\x18"
>>> next(InBuffer(d).parse_startup_message()).validate()
"""
return parse_map(lambda args: OmapiStartupMessage(*args), parse_chain(self.parse_net32int, lambda _: self.parse_net32int())) |
def file_length(in_file):
'''Function to return the length of a file.'''
fid = open(in_file)
data = fid.readlines()
fid.close()
return len(data) | def function[file_length, parameter[in_file]]:
constant[Function to return the length of a file.]
variable[fid] assign[=] call[name[open], parameter[name[in_file]]]
variable[data] assign[=] call[name[fid].readlines, parameter[]]
call[name[fid].close, parameter[]]
return[call[name[len], parameter[name[data]]]] | keyword[def] identifier[file_length] ( identifier[in_file] ):
literal[string]
identifier[fid] = identifier[open] ( identifier[in_file] )
identifier[data] = identifier[fid] . identifier[readlines] ()
identifier[fid] . identifier[close] ()
keyword[return] identifier[len] ( identifier[data] ) | def file_length(in_file):
"""Function to return the length of a file."""
fid = open(in_file)
data = fid.readlines()
fid.close()
return len(data) |
def parse_contents_summary(raw_json):
"""Parse a Confluence summary JSON list.
The method parses a JSON stream and returns an iterator
of diccionaries. Each dictionary is a content summary.
:param raw_json: JSON string to parse
:returns: a generator of parsed content summaries.
"""
summary = json.loads(raw_json)
contents = summary['results']
for c in contents:
yield c | def function[parse_contents_summary, parameter[raw_json]]:
constant[Parse a Confluence summary JSON list.
The method parses a JSON stream and returns an iterator
of diccionaries. Each dictionary is a content summary.
:param raw_json: JSON string to parse
:returns: a generator of parsed content summaries.
]
variable[summary] assign[=] call[name[json].loads, parameter[name[raw_json]]]
variable[contents] assign[=] call[name[summary]][constant[results]]
for taget[name[c]] in starred[name[contents]] begin[:]
<ast.Yield object at 0x7da1b0353850> | keyword[def] identifier[parse_contents_summary] ( identifier[raw_json] ):
literal[string]
identifier[summary] = identifier[json] . identifier[loads] ( identifier[raw_json] )
identifier[contents] = identifier[summary] [ literal[string] ]
keyword[for] identifier[c] keyword[in] identifier[contents] :
keyword[yield] identifier[c] | def parse_contents_summary(raw_json):
"""Parse a Confluence summary JSON list.
The method parses a JSON stream and returns an iterator
of diccionaries. Each dictionary is a content summary.
:param raw_json: JSON string to parse
:returns: a generator of parsed content summaries.
"""
summary = json.loads(raw_json)
contents = summary['results']
for c in contents:
yield c # depends on [control=['for'], data=['c']] |
def check(self, dsm, **kwargs):
"""
Check if matrix and its mediation matrix are compliant.
It means that number of dependencies for each (line, column) is either
0 if the mediation matrix (line, column) is 0, or >0 if the mediation
matrix (line, column) is 1.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
Returns:
bool: True if compliant, else False
"""
# generate complete_mediation_matrix according to each category
med_matrix = CompleteMediation.generate_mediation_matrix(dsm)
return CompleteMediation.matrices_compliance(dsm, med_matrix) | def function[check, parameter[self, dsm]]:
constant[
Check if matrix and its mediation matrix are compliant.
It means that number of dependencies for each (line, column) is either
0 if the mediation matrix (line, column) is 0, or >0 if the mediation
matrix (line, column) is 1.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
Returns:
bool: True if compliant, else False
]
variable[med_matrix] assign[=] call[name[CompleteMediation].generate_mediation_matrix, parameter[name[dsm]]]
return[call[name[CompleteMediation].matrices_compliance, parameter[name[dsm], name[med_matrix]]]] | keyword[def] identifier[check] ( identifier[self] , identifier[dsm] ,** identifier[kwargs] ):
literal[string]
identifier[med_matrix] = identifier[CompleteMediation] . identifier[generate_mediation_matrix] ( identifier[dsm] )
keyword[return] identifier[CompleteMediation] . identifier[matrices_compliance] ( identifier[dsm] , identifier[med_matrix] ) | def check(self, dsm, **kwargs):
"""
Check if matrix and its mediation matrix are compliant.
It means that number of dependencies for each (line, column) is either
0 if the mediation matrix (line, column) is 0, or >0 if the mediation
matrix (line, column) is 1.
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
Returns:
bool: True if compliant, else False
"""
# generate complete_mediation_matrix according to each category
med_matrix = CompleteMediation.generate_mediation_matrix(dsm)
return CompleteMediation.matrices_compliance(dsm, med_matrix) |
def parse(self, data):
"""
populate instance with values and sub-sections
:param data: UTF-8 encoded manifest
:type data: bytes
"""
data = data.decode('utf-8')
self.linesep = detect_linesep(data)
# the first section is the main one for the manifest. It's
# also where we will check for our newline separator
sections = parse_sections(data)
self.load(next(sections))
# and all following sections are considered sub-sections
for section in sections:
next_section = ManifestSection(None)
next_section.load(section)
self.sub_sections[next_section.primary()] = next_section | def function[parse, parameter[self, data]]:
constant[
populate instance with values and sub-sections
:param data: UTF-8 encoded manifest
:type data: bytes
]
variable[data] assign[=] call[name[data].decode, parameter[constant[utf-8]]]
name[self].linesep assign[=] call[name[detect_linesep], parameter[name[data]]]
variable[sections] assign[=] call[name[parse_sections], parameter[name[data]]]
call[name[self].load, parameter[call[name[next], parameter[name[sections]]]]]
for taget[name[section]] in starred[name[sections]] begin[:]
variable[next_section] assign[=] call[name[ManifestSection], parameter[constant[None]]]
call[name[next_section].load, parameter[name[section]]]
call[name[self].sub_sections][call[name[next_section].primary, parameter[]]] assign[=] name[next_section] | keyword[def] identifier[parse] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] = identifier[data] . identifier[decode] ( literal[string] )
identifier[self] . identifier[linesep] = identifier[detect_linesep] ( identifier[data] )
identifier[sections] = identifier[parse_sections] ( identifier[data] )
identifier[self] . identifier[load] ( identifier[next] ( identifier[sections] ))
keyword[for] identifier[section] keyword[in] identifier[sections] :
identifier[next_section] = identifier[ManifestSection] ( keyword[None] )
identifier[next_section] . identifier[load] ( identifier[section] )
identifier[self] . identifier[sub_sections] [ identifier[next_section] . identifier[primary] ()]= identifier[next_section] | def parse(self, data):
"""
populate instance with values and sub-sections
:param data: UTF-8 encoded manifest
:type data: bytes
"""
data = data.decode('utf-8')
self.linesep = detect_linesep(data)
# the first section is the main one for the manifest. It's
# also where we will check for our newline separator
sections = parse_sections(data)
self.load(next(sections))
# and all following sections are considered sub-sections
for section in sections:
next_section = ManifestSection(None)
next_section.load(section)
self.sub_sections[next_section.primary()] = next_section # depends on [control=['for'], data=['section']] |
def select(self, fcn, *args, **kwds):
""" Return arrays from an hdf5 file that satisfy the given function
Parameters
----------
fcn : a function
A function that accepts the same number of argument as keys given
and returns a boolean array of the same length.
args : strings
A variable number of strings that are keys into the hdf5. These must
refer to arrays of equal length.
chunksize : {1e6, int}, optional
Number of elements to read and process at a time.
return_indices : bool, optional
If True, also return the indices of elements passing the function.
Returns
-------
values : np.ndarrays
A variable number of arrays depending on the number of keys into
the hdf5 file that are given. If return_indices is True, the first
element is an array of indices of elements passing the function.
>>> f = HFile(filename)
>>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
"""
# get references to each array
refs = {}
data = {}
for arg in args:
refs[arg] = self[arg]
data[arg] = []
return_indices = kwds.get('return_indices', False)
indices = np.array([], dtype=np.uint64)
# To conserve memory read the array in chunks
chunksize = kwds.get('chunksize', int(1e6))
size = len(refs[arg])
i = 0
while i < size:
r = i + chunksize if i + chunksize < size else size
#Read each chunks worth of data and find where it passes the function
partial = [refs[arg][i:r] for arg in args]
keep = fcn(*partial)
if return_indices:
indices = np.concatenate([indices, np.flatnonzero(keep) + i])
#store only the results that pass the function
for arg, part in zip(args, partial):
data[arg].append(part[keep])
i += chunksize
# Combine the partial results into full arrays
if len(args) == 1:
res = np.concatenate(data[args[0]])
if return_indices:
return indices, res
else:
return res
else:
res = tuple(np.concatenate(data[arg]) for arg in args)
if return_indices:
return (indices,) + res
else:
return res | def function[select, parameter[self, fcn]]:
constant[ Return arrays from an hdf5 file that satisfy the given function
Parameters
----------
fcn : a function
A function that accepts the same number of argument as keys given
and returns a boolean array of the same length.
args : strings
A variable number of strings that are keys into the hdf5. These must
refer to arrays of equal length.
chunksize : {1e6, int}, optional
Number of elements to read and process at a time.
return_indices : bool, optional
If True, also return the indices of elements passing the function.
Returns
-------
values : np.ndarrays
A variable number of arrays depending on the number of keys into
the hdf5 file that are given. If return_indices is True, the first
element is an array of indices of elements passing the function.
>>> f = HFile(filename)
>>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
]
variable[refs] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
for taget[name[arg]] in starred[name[args]] begin[:]
call[name[refs]][name[arg]] assign[=] call[name[self]][name[arg]]
call[name[data]][name[arg]] assign[=] list[[]]
variable[return_indices] assign[=] call[name[kwds].get, parameter[constant[return_indices], constant[False]]]
variable[indices] assign[=] call[name[np].array, parameter[list[[]]]]
variable[chunksize] assign[=] call[name[kwds].get, parameter[constant[chunksize], call[name[int], parameter[constant[1000000.0]]]]]
variable[size] assign[=] call[name[len], parameter[call[name[refs]][name[arg]]]]
variable[i] assign[=] constant[0]
while compare[name[i] less[<] name[size]] begin[:]
variable[r] assign[=] <ast.IfExp object at 0x7da20c992050>
variable[partial] assign[=] <ast.ListComp object at 0x7da20c990af0>
variable[keep] assign[=] call[name[fcn], parameter[<ast.Starred object at 0x7da20c9915a0>]]
if name[return_indices] begin[:]
variable[indices] assign[=] call[name[np].concatenate, parameter[list[[<ast.Name object at 0x7da18dc046a0>, <ast.BinOp object at 0x7da18dc07880>]]]]
for taget[tuple[[<ast.Name object at 0x7da18dc06230>, <ast.Name object at 0x7da18dc071c0>]]] in starred[call[name[zip], parameter[name[args], name[partial]]]] begin[:]
call[call[name[data]][name[arg]].append, parameter[call[name[part]][name[keep]]]]
<ast.AugAssign object at 0x7da18dc05e40>
if compare[call[name[len], parameter[name[args]]] equal[==] constant[1]] begin[:]
variable[res] assign[=] call[name[np].concatenate, parameter[call[name[data]][call[name[args]][constant[0]]]]]
if name[return_indices] begin[:]
return[tuple[[<ast.Name object at 0x7da18dc05210>, <ast.Name object at 0x7da18dc07820>]]] | keyword[def] identifier[select] ( identifier[self] , identifier[fcn] ,* identifier[args] ,** identifier[kwds] ):
literal[string]
identifier[refs] ={}
identifier[data] ={}
keyword[for] identifier[arg] keyword[in] identifier[args] :
identifier[refs] [ identifier[arg] ]= identifier[self] [ identifier[arg] ]
identifier[data] [ identifier[arg] ]=[]
identifier[return_indices] = identifier[kwds] . identifier[get] ( literal[string] , keyword[False] )
identifier[indices] = identifier[np] . identifier[array] ([], identifier[dtype] = identifier[np] . identifier[uint64] )
identifier[chunksize] = identifier[kwds] . identifier[get] ( literal[string] , identifier[int] ( literal[int] ))
identifier[size] = identifier[len] ( identifier[refs] [ identifier[arg] ])
identifier[i] = literal[int]
keyword[while] identifier[i] < identifier[size] :
identifier[r] = identifier[i] + identifier[chunksize] keyword[if] identifier[i] + identifier[chunksize] < identifier[size] keyword[else] identifier[size]
identifier[partial] =[ identifier[refs] [ identifier[arg] ][ identifier[i] : identifier[r] ] keyword[for] identifier[arg] keyword[in] identifier[args] ]
identifier[keep] = identifier[fcn] (* identifier[partial] )
keyword[if] identifier[return_indices] :
identifier[indices] = identifier[np] . identifier[concatenate] ([ identifier[indices] , identifier[np] . identifier[flatnonzero] ( identifier[keep] )+ identifier[i] ])
keyword[for] identifier[arg] , identifier[part] keyword[in] identifier[zip] ( identifier[args] , identifier[partial] ):
identifier[data] [ identifier[arg] ]. identifier[append] ( identifier[part] [ identifier[keep] ])
identifier[i] += identifier[chunksize]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
identifier[res] = identifier[np] . identifier[concatenate] ( identifier[data] [ identifier[args] [ literal[int] ]])
keyword[if] identifier[return_indices] :
keyword[return] identifier[indices] , identifier[res]
keyword[else] :
keyword[return] identifier[res]
keyword[else] :
identifier[res] = identifier[tuple] ( identifier[np] . identifier[concatenate] ( identifier[data] [ identifier[arg] ]) keyword[for] identifier[arg] keyword[in] identifier[args] )
keyword[if] identifier[return_indices] :
keyword[return] ( identifier[indices] ,)+ identifier[res]
keyword[else] :
keyword[return] identifier[res] | def select(self, fcn, *args, **kwds):
""" Return arrays from an hdf5 file that satisfy the given function
Parameters
----------
fcn : a function
A function that accepts the same number of argument as keys given
and returns a boolean array of the same length.
args : strings
A variable number of strings that are keys into the hdf5. These must
refer to arrays of equal length.
chunksize : {1e6, int}, optional
Number of elements to read and process at a time.
return_indices : bool, optional
If True, also return the indices of elements passing the function.
Returns
-------
values : np.ndarrays
A variable number of arrays depending on the number of keys into
the hdf5 file that are given. If return_indices is True, the first
element is an array of indices of elements passing the function.
>>> f = HFile(filename)
>>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
"""
# get references to each array
refs = {}
data = {}
for arg in args:
refs[arg] = self[arg]
data[arg] = [] # depends on [control=['for'], data=['arg']]
return_indices = kwds.get('return_indices', False)
indices = np.array([], dtype=np.uint64)
# To conserve memory read the array in chunks
chunksize = kwds.get('chunksize', int(1000000.0))
size = len(refs[arg])
i = 0
while i < size:
r = i + chunksize if i + chunksize < size else size
#Read each chunks worth of data and find where it passes the function
partial = [refs[arg][i:r] for arg in args]
keep = fcn(*partial)
if return_indices:
indices = np.concatenate([indices, np.flatnonzero(keep) + i]) # depends on [control=['if'], data=[]]
#store only the results that pass the function
for (arg, part) in zip(args, partial):
data[arg].append(part[keep]) # depends on [control=['for'], data=[]]
i += chunksize # depends on [control=['while'], data=['i', 'size']]
# Combine the partial results into full arrays
if len(args) == 1:
res = np.concatenate(data[args[0]])
if return_indices:
return (indices, res) # depends on [control=['if'], data=[]]
else:
return res # depends on [control=['if'], data=[]]
else:
res = tuple((np.concatenate(data[arg]) for arg in args))
if return_indices:
return (indices,) + res # depends on [control=['if'], data=[]]
else:
return res |
def remote(self, name='origin'):
""":return: Remote with the specified name
:raise ValueError: if no remote with such a name exists"""
r = Remote(self, name)
if not r.exists():
raise ValueError("Remote named '%s' didn't exist" % name)
return r | def function[remote, parameter[self, name]]:
constant[:return: Remote with the specified name
:raise ValueError: if no remote with such a name exists]
variable[r] assign[=] call[name[Remote], parameter[name[self], name[name]]]
if <ast.UnaryOp object at 0x7da1b1d47130> begin[:]
<ast.Raise object at 0x7da1b1d9c850>
return[name[r]] | keyword[def] identifier[remote] ( identifier[self] , identifier[name] = literal[string] ):
literal[string]
identifier[r] = identifier[Remote] ( identifier[self] , identifier[name] )
keyword[if] keyword[not] identifier[r] . identifier[exists] ():
keyword[raise] identifier[ValueError] ( literal[string] % identifier[name] )
keyword[return] identifier[r] | def remote(self, name='origin'):
""":return: Remote with the specified name
:raise ValueError: if no remote with such a name exists"""
r = Remote(self, name)
if not r.exists():
raise ValueError("Remote named '%s' didn't exist" % name) # depends on [control=['if'], data=[]]
return r |
def relative_deviation(h1, h2): # 18 us @array, 42 us @list \w 100 bins
r"""
Calculate the deviation between two histograms.
The relative deviation between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_{rd}(H, H') =
\frac{
\sqrt{\sum_{m=1}^M(H_m - H'_m)^2}
}{
\frac{1}{2}
\left(
\sqrt{\sum_{m=1}^M H_m^2} +
\sqrt{\sum_{m=1}^M {H'}_m^2}
\right)
}
*Attributes:*
- semimetric (triangle equation satisfied?)
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, \sqrt{2}]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, 2]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram, same bins as ``h1``.
Returns
-------
relative_deviation : float
Relative deviation between the two histograms.
"""
h1, h2 = __prepare_histogram(h1, h2)
numerator = math.sqrt(scipy.sum(scipy.square(h1 - h2)))
denominator = (math.sqrt(scipy.sum(scipy.square(h1))) + math.sqrt(scipy.sum(scipy.square(h2)))) / 2.
return numerator / denominator | def function[relative_deviation, parameter[h1, h2]]:
constant[
Calculate the deviation between two histograms.
The relative deviation between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_{rd}(H, H') =
\frac{
\sqrt{\sum_{m=1}^M(H_m - H'_m)^2}
}{
\frac{1}{2}
\left(
\sqrt{\sum_{m=1}^M H_m^2} +
\sqrt{\sum_{m=1}^M {H'}_m^2}
\right)
}
*Attributes:*
- semimetric (triangle equation satisfied?)
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, \sqrt{2}]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, 2]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram, same bins as ``h1``.
Returns
-------
relative_deviation : float
Relative deviation between the two histograms.
]
<ast.Tuple object at 0x7da204622f80> assign[=] call[name[__prepare_histogram], parameter[name[h1], name[h2]]]
variable[numerator] assign[=] call[name[math].sqrt, parameter[call[name[scipy].sum, parameter[call[name[scipy].square, parameter[binary_operation[name[h1] - name[h2]]]]]]]]
variable[denominator] assign[=] binary_operation[binary_operation[call[name[math].sqrt, parameter[call[name[scipy].sum, parameter[call[name[scipy].square, parameter[name[h1]]]]]]] + call[name[math].sqrt, parameter[call[name[scipy].sum, parameter[call[name[scipy].square, parameter[name[h2]]]]]]]] / constant[2.0]]
return[binary_operation[name[numerator] / name[denominator]]] | keyword[def] identifier[relative_deviation] ( identifier[h1] , identifier[h2] ):
literal[string]
identifier[h1] , identifier[h2] = identifier[__prepare_histogram] ( identifier[h1] , identifier[h2] )
identifier[numerator] = identifier[math] . identifier[sqrt] ( identifier[scipy] . identifier[sum] ( identifier[scipy] . identifier[square] ( identifier[h1] - identifier[h2] )))
identifier[denominator] =( identifier[math] . identifier[sqrt] ( identifier[scipy] . identifier[sum] ( identifier[scipy] . identifier[square] ( identifier[h1] )))+ identifier[math] . identifier[sqrt] ( identifier[scipy] . identifier[sum] ( identifier[scipy] . identifier[square] ( identifier[h2] ))))/ literal[int]
keyword[return] identifier[numerator] / identifier[denominator] | def relative_deviation(h1, h2): # 18 us @array, 42 us @list \w 100 bins
"\n Calculate the deviation between two histograms.\n \n The relative deviation between two histograms :math:`H` and :math:`H'` of size :math:`m` is\n defined as:\n \n .. math::\n \n d_{rd}(H, H') =\n \\frac{\n \\sqrt{\\sum_{m=1}^M(H_m - H'_m)^2}\n }{\n \\frac{1}{2}\n \\left(\n \\sqrt{\\sum_{m=1}^M H_m^2} +\n \\sqrt{\\sum_{m=1}^M {H'}_m^2}\n \\right)\n }\n \n *Attributes:*\n\n - semimetric (triangle equation satisfied?)\n \n *Attributes for normalized histograms:*\n\n - :math:`d(H, H')\\in[0, \\sqrt{2}]`\n - :math:`d(H, H) = 0`\n - :math:`d(H, H') = d(H', H)`\n \n *Attributes for not-normalized histograms:*\n\n - :math:`d(H, H')\\in[0, 2]`\n - :math:`d(H, H) = 0`\n - :math:`d(H, H') = d(H', H)`\n \n *Attributes for not-equal histograms:*\n\n - not applicable \n \n Parameters\n ----------\n h1 : sequence\n The first histogram.\n h2 : sequence\n The second histogram, same bins as ``h1``.\n \n Returns\n -------\n relative_deviation : float\n Relative deviation between the two histograms.\n "
(h1, h2) = __prepare_histogram(h1, h2)
numerator = math.sqrt(scipy.sum(scipy.square(h1 - h2)))
denominator = (math.sqrt(scipy.sum(scipy.square(h1))) + math.sqrt(scipy.sum(scipy.square(h2)))) / 2.0
return numerator / denominator |
def create_archive(archive, filenames, verbosity=0, program=None, interactive=True):
"""Create given archive with given files."""
util.check_new_filename(archive)
util.check_archive_filelist(filenames)
if verbosity >= 0:
util.log_info("Creating %s ..." % archive)
res = _create_archive(archive, filenames, verbosity=verbosity,
interactive=interactive, program=program)
if verbosity >= 0:
util.log_info("... %s created." % archive)
return res | def function[create_archive, parameter[archive, filenames, verbosity, program, interactive]]:
constant[Create given archive with given files.]
call[name[util].check_new_filename, parameter[name[archive]]]
call[name[util].check_archive_filelist, parameter[name[filenames]]]
if compare[name[verbosity] greater_or_equal[>=] constant[0]] begin[:]
call[name[util].log_info, parameter[binary_operation[constant[Creating %s ...] <ast.Mod object at 0x7da2590d6920> name[archive]]]]
variable[res] assign[=] call[name[_create_archive], parameter[name[archive], name[filenames]]]
if compare[name[verbosity] greater_or_equal[>=] constant[0]] begin[:]
call[name[util].log_info, parameter[binary_operation[constant[... %s created.] <ast.Mod object at 0x7da2590d6920> name[archive]]]]
return[name[res]] | keyword[def] identifier[create_archive] ( identifier[archive] , identifier[filenames] , identifier[verbosity] = literal[int] , identifier[program] = keyword[None] , identifier[interactive] = keyword[True] ):
literal[string]
identifier[util] . identifier[check_new_filename] ( identifier[archive] )
identifier[util] . identifier[check_archive_filelist] ( identifier[filenames] )
keyword[if] identifier[verbosity] >= literal[int] :
identifier[util] . identifier[log_info] ( literal[string] % identifier[archive] )
identifier[res] = identifier[_create_archive] ( identifier[archive] , identifier[filenames] , identifier[verbosity] = identifier[verbosity] ,
identifier[interactive] = identifier[interactive] , identifier[program] = identifier[program] )
keyword[if] identifier[verbosity] >= literal[int] :
identifier[util] . identifier[log_info] ( literal[string] % identifier[archive] )
keyword[return] identifier[res] | def create_archive(archive, filenames, verbosity=0, program=None, interactive=True):
"""Create given archive with given files."""
util.check_new_filename(archive)
util.check_archive_filelist(filenames)
if verbosity >= 0:
util.log_info('Creating %s ...' % archive) # depends on [control=['if'], data=[]]
res = _create_archive(archive, filenames, verbosity=verbosity, interactive=interactive, program=program)
if verbosity >= 0:
util.log_info('... %s created.' % archive) # depends on [control=['if'], data=[]]
return res |
def __pathToTuple(self, path):
"""
Convert directory or file path to its tuple identifier.
Parameters
----------
path : str
Path to convert. It can look like /, /directory, /directory/ or /directory/filename.
Returns
-------
tup_id : tuple
Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main
directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename`
will be ``None``.
Raises
------
YTFS.PathConvertError
When invalid path is given.
"""
if not path or path.count('/') > 2:
raise YTFS.PathConvertError("Bad path given") # empty or too deep path
try:
split = path.split('/')
except (AttributeError, TypeError):
raise TypeError("Path has to be string") #path is not a string
if split[0]:
raise YTFS.PathConvertError("Path needs to start with '/'") # path doesn't start with '/'.
del split[0]
try:
if not split[-1]: split.pop() # given path ended with '/'.
except IndexError:
raise YTFS.PathConvertError("Bad path given") # at least one element in split should exist at the moment
if len(split) > 2:
raise YTFS.PathConvertError("Path is too deep. Max allowed level is 2") # should happen due to first check, but ...
try:
d = split[0]
except IndexError:
d = None
try:
f = split[1]
except IndexError:
f = None
if not d and f:
raise YTFS.PathConvertError("Bad path given") # filename is present, but directory is not #sheeeeeeiiit
return (d, f) | def function[__pathToTuple, parameter[self, path]]:
constant[
Convert directory or file path to its tuple identifier.
Parameters
----------
path : str
Path to convert. It can look like /, /directory, /directory/ or /directory/filename.
Returns
-------
tup_id : tuple
Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main
directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename`
will be ``None``.
Raises
------
YTFS.PathConvertError
When invalid path is given.
]
if <ast.BoolOp object at 0x7da18f8127d0> begin[:]
<ast.Raise object at 0x7da18f812b90>
<ast.Try object at 0x7da18f812050>
if call[name[split]][constant[0]] begin[:]
<ast.Raise object at 0x7da18f8117b0>
<ast.Delete object at 0x7da18f813c10>
<ast.Try object at 0x7da18f811570>
if compare[call[name[len], parameter[name[split]]] greater[>] constant[2]] begin[:]
<ast.Raise object at 0x7da18f813e20>
<ast.Try object at 0x7da18f812920>
<ast.Try object at 0x7da18f813a60>
if <ast.BoolOp object at 0x7da18f811f30> begin[:]
<ast.Raise object at 0x7da18f810e80>
return[tuple[[<ast.Name object at 0x7da18f8105b0>, <ast.Name object at 0x7da18f812f50>]]] | keyword[def] identifier[__pathToTuple] ( identifier[self] , identifier[path] ):
literal[string]
keyword[if] keyword[not] identifier[path] keyword[or] identifier[path] . identifier[count] ( literal[string] )> literal[int] :
keyword[raise] identifier[YTFS] . identifier[PathConvertError] ( literal[string] )
keyword[try] :
identifier[split] = identifier[path] . identifier[split] ( literal[string] )
keyword[except] ( identifier[AttributeError] , identifier[TypeError] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[split] [ literal[int] ]:
keyword[raise] identifier[YTFS] . identifier[PathConvertError] ( literal[string] )
keyword[del] identifier[split] [ literal[int] ]
keyword[try] :
keyword[if] keyword[not] identifier[split] [- literal[int] ]: identifier[split] . identifier[pop] ()
keyword[except] identifier[IndexError] :
keyword[raise] identifier[YTFS] . identifier[PathConvertError] ( literal[string] )
keyword[if] identifier[len] ( identifier[split] )> literal[int] :
keyword[raise] identifier[YTFS] . identifier[PathConvertError] ( literal[string] )
keyword[try] :
identifier[d] = identifier[split] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[d] = keyword[None]
keyword[try] :
identifier[f] = identifier[split] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[f] = keyword[None]
keyword[if] keyword[not] identifier[d] keyword[and] identifier[f] :
keyword[raise] identifier[YTFS] . identifier[PathConvertError] ( literal[string] )
keyword[return] ( identifier[d] , identifier[f] ) | def __pathToTuple(self, path):
"""
Convert directory or file path to its tuple identifier.
Parameters
----------
path : str
Path to convert. It can look like /, /directory, /directory/ or /directory/filename.
Returns
-------
tup_id : tuple
Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main
directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename`
will be ``None``.
Raises
------
YTFS.PathConvertError
When invalid path is given.
"""
if not path or path.count('/') > 2:
raise YTFS.PathConvertError('Bad path given') # empty or too deep path # depends on [control=['if'], data=[]]
try:
split = path.split('/') # depends on [control=['try'], data=[]]
except (AttributeError, TypeError):
raise TypeError('Path has to be string') #path is not a string # depends on [control=['except'], data=[]]
if split[0]:
raise YTFS.PathConvertError("Path needs to start with '/'") # path doesn't start with '/'. # depends on [control=['if'], data=[]]
del split[0]
try:
if not split[-1]:
split.pop() # given path ended with '/'. # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IndexError:
raise YTFS.PathConvertError('Bad path given') # at least one element in split should exist at the moment # depends on [control=['except'], data=[]]
if len(split) > 2:
raise YTFS.PathConvertError('Path is too deep. Max allowed level is 2') # should happen due to first check, but ... # depends on [control=['if'], data=[]]
try:
d = split[0] # depends on [control=['try'], data=[]]
except IndexError:
d = None # depends on [control=['except'], data=[]]
try:
f = split[1] # depends on [control=['try'], data=[]]
except IndexError:
f = None # depends on [control=['except'], data=[]]
if not d and f:
raise YTFS.PathConvertError('Bad path given') # filename is present, but directory is not #sheeeeeeiiit # depends on [control=['if'], data=[]]
return (d, f) |
def messages(path, thread, fmt, nocolor, timezones, utc, noprogress, resolve, directory):
"""
Conversion of Facebook chat history.
"""
with colorize_output(nocolor):
try:
chat_history = _process_history(
path=path, thread=thread, timezones=timezones,
utc=utc, noprogress=noprogress, resolve=resolve)
except ProcessingFailure:
return
if directory:
set_all_color(enabled=False)
write(fmt, chat_history, directory or sys.stdout) | def function[messages, parameter[path, thread, fmt, nocolor, timezones, utc, noprogress, resolve, directory]]:
constant[
Conversion of Facebook chat history.
]
with call[name[colorize_output], parameter[name[nocolor]]] begin[:]
<ast.Try object at 0x7da18f58cd30>
if name[directory] begin[:]
call[name[set_all_color], parameter[]]
call[name[write], parameter[name[fmt], name[chat_history], <ast.BoolOp object at 0x7da18ede5120>]] | keyword[def] identifier[messages] ( identifier[path] , identifier[thread] , identifier[fmt] , identifier[nocolor] , identifier[timezones] , identifier[utc] , identifier[noprogress] , identifier[resolve] , identifier[directory] ):
literal[string]
keyword[with] identifier[colorize_output] ( identifier[nocolor] ):
keyword[try] :
identifier[chat_history] = identifier[_process_history] (
identifier[path] = identifier[path] , identifier[thread] = identifier[thread] , identifier[timezones] = identifier[timezones] ,
identifier[utc] = identifier[utc] , identifier[noprogress] = identifier[noprogress] , identifier[resolve] = identifier[resolve] )
keyword[except] identifier[ProcessingFailure] :
keyword[return]
keyword[if] identifier[directory] :
identifier[set_all_color] ( identifier[enabled] = keyword[False] )
identifier[write] ( identifier[fmt] , identifier[chat_history] , identifier[directory] keyword[or] identifier[sys] . identifier[stdout] ) | def messages(path, thread, fmt, nocolor, timezones, utc, noprogress, resolve, directory):
"""
Conversion of Facebook chat history.
"""
with colorize_output(nocolor):
try:
chat_history = _process_history(path=path, thread=thread, timezones=timezones, utc=utc, noprogress=noprogress, resolve=resolve) # depends on [control=['try'], data=[]]
except ProcessingFailure:
return # depends on [control=['except'], data=[]]
if directory:
set_all_color(enabled=False) # depends on [control=['if'], data=[]]
write(fmt, chat_history, directory or sys.stdout) # depends on [control=['with'], data=[]] |
def _on_process_started(self):
""" Logs process started """
comm('backend process started')
if self is None:
return
self.starting = False
self.running = True | def function[_on_process_started, parameter[self]]:
constant[ Logs process started ]
call[name[comm], parameter[constant[backend process started]]]
if compare[name[self] is constant[None]] begin[:]
return[None]
name[self].starting assign[=] constant[False]
name[self].running assign[=] constant[True] | keyword[def] identifier[_on_process_started] ( identifier[self] ):
literal[string]
identifier[comm] ( literal[string] )
keyword[if] identifier[self] keyword[is] keyword[None] :
keyword[return]
identifier[self] . identifier[starting] = keyword[False]
identifier[self] . identifier[running] = keyword[True] | def _on_process_started(self):
""" Logs process started """
comm('backend process started')
if self is None:
return # depends on [control=['if'], data=[]]
self.starting = False
self.running = True |
def enable(profile='allprofiles'):
'''
.. versionadded:: 2015.5.0
Enable firewall profile
Args:
profile (Optional[str]): The name of the profile to enable. Default is
``allprofiles``. Valid options are:
- allprofiles
- domainprofile
- privateprofile
- publicprofile
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.enable
'''
cmd = ['netsh', 'advfirewall', 'set', profile, 'state', 'on']
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
if ret['retcode'] != 0:
raise CommandExecutionError(ret['stdout'])
return True | def function[enable, parameter[profile]]:
constant[
.. versionadded:: 2015.5.0
Enable firewall profile
Args:
profile (Optional[str]): The name of the profile to enable. Default is
``allprofiles``. Valid options are:
- allprofiles
- domainprofile
- privateprofile
- publicprofile
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.enable
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da20c6c7340>, <ast.Constant object at 0x7da20c6c42b0>, <ast.Constant object at 0x7da20c6c4940>, <ast.Name object at 0x7da20c6c6800>, <ast.Constant object at 0x7da20c6c7ee0>, <ast.Constant object at 0x7da20c6c6200>]]
variable[ret] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
if compare[call[name[ret]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da20c6c4130>
return[constant[True]] | keyword[def] identifier[enable] ( identifier[profile] = literal[string] ):
literal[string]
identifier[cmd] =[ literal[string] , literal[string] , literal[string] , identifier[profile] , literal[string] , literal[string] ]
identifier[ret] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] , identifier[ignore_retcode] = keyword[True] )
keyword[if] identifier[ret] [ literal[string] ]!= literal[int] :
keyword[raise] identifier[CommandExecutionError] ( identifier[ret] [ literal[string] ])
keyword[return] keyword[True] | def enable(profile='allprofiles'):
"""
.. versionadded:: 2015.5.0
Enable firewall profile
Args:
profile (Optional[str]): The name of the profile to enable. Default is
``allprofiles``. Valid options are:
- allprofiles
- domainprofile
- privateprofile
- publicprofile
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.enable
"""
cmd = ['netsh', 'advfirewall', 'set', profile, 'state', 'on']
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
if ret['retcode'] != 0:
raise CommandExecutionError(ret['stdout']) # depends on [control=['if'], data=[]]
return True |
def get_best_fit_parameters_grouped(self):
"""Returns a dictionary of the best fit."""
result_dict = dict()
result_dict['ocv'] = [parameters['ocv'] for parameters in
self.best_fit_parameters]
for i in range(self.circuits):
result_dict['t' + str(i)] = [parameters['t' + str(i)] for parameters
in self.best_fit_parameters]
result_dict['w' + str(i)] = [parameters['w' + str(i)] for parameters
in self.best_fit_parameters]
return result_dict | def function[get_best_fit_parameters_grouped, parameter[self]]:
constant[Returns a dictionary of the best fit.]
variable[result_dict] assign[=] call[name[dict], parameter[]]
call[name[result_dict]][constant[ocv]] assign[=] <ast.ListComp object at 0x7da1b1969540>
for taget[name[i]] in starred[call[name[range], parameter[name[self].circuits]]] begin[:]
call[name[result_dict]][binary_operation[constant[t] + call[name[str], parameter[name[i]]]]] assign[=] <ast.ListComp object at 0x7da1b1968790>
call[name[result_dict]][binary_operation[constant[w] + call[name[str], parameter[name[i]]]]] assign[=] <ast.ListComp object at 0x7da1b196a440>
return[name[result_dict]] | keyword[def] identifier[get_best_fit_parameters_grouped] ( identifier[self] ):
literal[string]
identifier[result_dict] = identifier[dict] ()
identifier[result_dict] [ literal[string] ]=[ identifier[parameters] [ literal[string] ] keyword[for] identifier[parameters] keyword[in]
identifier[self] . identifier[best_fit_parameters] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[circuits] ):
identifier[result_dict] [ literal[string] + identifier[str] ( identifier[i] )]=[ identifier[parameters] [ literal[string] + identifier[str] ( identifier[i] )] keyword[for] identifier[parameters]
keyword[in] identifier[self] . identifier[best_fit_parameters] ]
identifier[result_dict] [ literal[string] + identifier[str] ( identifier[i] )]=[ identifier[parameters] [ literal[string] + identifier[str] ( identifier[i] )] keyword[for] identifier[parameters]
keyword[in] identifier[self] . identifier[best_fit_parameters] ]
keyword[return] identifier[result_dict] | def get_best_fit_parameters_grouped(self):
"""Returns a dictionary of the best fit."""
result_dict = dict()
result_dict['ocv'] = [parameters['ocv'] for parameters in self.best_fit_parameters]
for i in range(self.circuits):
result_dict['t' + str(i)] = [parameters['t' + str(i)] for parameters in self.best_fit_parameters]
result_dict['w' + str(i)] = [parameters['w' + str(i)] for parameters in self.best_fit_parameters] # depends on [control=['for'], data=['i']]
return result_dict |
def prepare_mosaic(self, image, fov_deg, name=None):
"""Prepare a new (blank) mosaic image based on the pointing of
the parameter image
"""
header = image.get_header()
ra_deg, dec_deg = header['CRVAL1'], header['CRVAL2']
data_np = image.get_data()
#dtype = data_np.dtype
dtype = None
self.bg_ref = iqcalc.get_median(data_np)
# TODO: handle skew (differing rotation for each axis)?
skew_limit = self.settings.get('skew_limit', 0.1)
(rot_deg, cdelt1, cdelt2) = wcs.get_rotation_and_scale(
header, skew_threshold=skew_limit)
self.logger.debug("image0 rot=%f cdelt1=%f cdelt2=%f" % (
rot_deg, cdelt1, cdelt2))
# Prepare pixel scale for each axis
px_scale = (math.fabs(cdelt1), math.fabs(cdelt2))
cdbase = [np.sign(cdelt1), np.sign(cdelt2)]
reuse_image = self.settings.get('reuse_image', False)
if (not reuse_image) or (self.img_mosaic is None):
self.logger.debug("creating blank image to hold mosaic")
self.fv.gui_do(self._prepare_mosaic1, "Creating blank image...")
# GC old mosaic
self.img_mosaic = None
img_mosaic = dp.create_blank_image(ra_deg, dec_deg,
fov_deg, px_scale,
rot_deg,
cdbase=cdbase,
logger=self.logger,
pfx='mosaic',
dtype=dtype)
if name is not None:
img_mosaic.set(name=name)
imname = img_mosaic.get('name', image.get('name', "NoName"))
self.logger.debug("mosaic name is '%s'" % (imname))
# avoid making a thumbnail of this if seed image is also that way
nothumb = not self.settings.get('make_thumbs', False)
if nothumb:
img_mosaic.set(nothumb=True)
# image is not on disk, set indication for other plugins
img_mosaic.set(path=None)
# TODO: fill in interesting/select object headers from seed image
self.img_mosaic = img_mosaic
self.logger.info("adding mosaic image '%s' to channel" % (imname))
self.fv.gui_call(self.fv.add_image, imname, img_mosaic,
chname=self.mosaic_chname)
else:
# <-- reuse image (faster)
self.logger.debug("Reusing previous mosaic image")
self.fv.gui_do(
self._prepare_mosaic1, "Reusing previous mosaic image...")
img_mosaic = dp.recycle_image(self.img_mosaic,
ra_deg, dec_deg,
fov_deg, px_scale,
rot_deg,
cdbase=cdbase,
logger=self.logger,
pfx='mosaic')
header = img_mosaic.get_header()
(rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(
header, skew_threshold=skew_limit)
self.logger.debug("mosaic rot=%f cdelt1=%f cdelt2=%f" % (
rot, cdelt1, cdelt2))
return img_mosaic | def function[prepare_mosaic, parameter[self, image, fov_deg, name]]:
constant[Prepare a new (blank) mosaic image based on the pointing of
the parameter image
]
variable[header] assign[=] call[name[image].get_header, parameter[]]
<ast.Tuple object at 0x7da1b0dbc940> assign[=] tuple[[<ast.Subscript object at 0x7da1b0dbe800>, <ast.Subscript object at 0x7da1b0dbee30>]]
variable[data_np] assign[=] call[name[image].get_data, parameter[]]
variable[dtype] assign[=] constant[None]
name[self].bg_ref assign[=] call[name[iqcalc].get_median, parameter[name[data_np]]]
variable[skew_limit] assign[=] call[name[self].settings.get, parameter[constant[skew_limit], constant[0.1]]]
<ast.Tuple object at 0x7da1b0dbf370> assign[=] call[name[wcs].get_rotation_and_scale, parameter[name[header]]]
call[name[self].logger.debug, parameter[binary_operation[constant[image0 rot=%f cdelt1=%f cdelt2=%f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0dbe1a0>, <ast.Name object at 0x7da1b0dbda80>, <ast.Name object at 0x7da1b0dbe8f0>]]]]]
variable[px_scale] assign[=] tuple[[<ast.Call object at 0x7da1b0dbd720>, <ast.Call object at 0x7da1b0dbee90>]]
variable[cdbase] assign[=] list[[<ast.Call object at 0x7da1b0dbd4b0>, <ast.Call object at 0x7da1b0dbc400>]]
variable[reuse_image] assign[=] call[name[self].settings.get, parameter[constant[reuse_image], constant[False]]]
if <ast.BoolOp object at 0x7da1b0dbe380> begin[:]
call[name[self].logger.debug, parameter[constant[creating blank image to hold mosaic]]]
call[name[self].fv.gui_do, parameter[name[self]._prepare_mosaic1, constant[Creating blank image...]]]
name[self].img_mosaic assign[=] constant[None]
variable[img_mosaic] assign[=] call[name[dp].create_blank_image, parameter[name[ra_deg], name[dec_deg], name[fov_deg], name[px_scale], name[rot_deg]]]
if compare[name[name] is_not constant[None]] begin[:]
call[name[img_mosaic].set, parameter[]]
variable[imname] assign[=] call[name[img_mosaic].get, parameter[constant[name], call[name[image].get, parameter[constant[name], constant[NoName]]]]]
call[name[self].logger.debug, parameter[binary_operation[constant[mosaic name is '%s'] <ast.Mod object at 0x7da2590d6920> name[imname]]]]
variable[nothumb] assign[=] <ast.UnaryOp object at 0x7da1b0dbee00>
if name[nothumb] begin[:]
call[name[img_mosaic].set, parameter[]]
call[name[img_mosaic].set, parameter[]]
name[self].img_mosaic assign[=] name[img_mosaic]
call[name[self].logger.info, parameter[binary_operation[constant[adding mosaic image '%s' to channel] <ast.Mod object at 0x7da2590d6920> name[imname]]]]
call[name[self].fv.gui_call, parameter[name[self].fv.add_image, name[imname], name[img_mosaic]]]
variable[header] assign[=] call[name[img_mosaic].get_header, parameter[]]
<ast.Tuple object at 0x7da207f9a650> assign[=] call[name[wcs].get_rotation_and_scale, parameter[name[header]]]
call[name[self].logger.debug, parameter[binary_operation[constant[mosaic rot=%f cdelt1=%f cdelt2=%f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0da29b0>, <ast.Name object at 0x7da1b0da2c80>, <ast.Name object at 0x7da1b0da0eb0>]]]]]
return[name[img_mosaic]] | keyword[def] identifier[prepare_mosaic] ( identifier[self] , identifier[image] , identifier[fov_deg] , identifier[name] = keyword[None] ):
literal[string]
identifier[header] = identifier[image] . identifier[get_header] ()
identifier[ra_deg] , identifier[dec_deg] = identifier[header] [ literal[string] ], identifier[header] [ literal[string] ]
identifier[data_np] = identifier[image] . identifier[get_data] ()
identifier[dtype] = keyword[None]
identifier[self] . identifier[bg_ref] = identifier[iqcalc] . identifier[get_median] ( identifier[data_np] )
identifier[skew_limit] = identifier[self] . identifier[settings] . identifier[get] ( literal[string] , literal[int] )
( identifier[rot_deg] , identifier[cdelt1] , identifier[cdelt2] )= identifier[wcs] . identifier[get_rotation_and_scale] (
identifier[header] , identifier[skew_threshold] = identifier[skew_limit] )
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %(
identifier[rot_deg] , identifier[cdelt1] , identifier[cdelt2] ))
identifier[px_scale] =( identifier[math] . identifier[fabs] ( identifier[cdelt1] ), identifier[math] . identifier[fabs] ( identifier[cdelt2] ))
identifier[cdbase] =[ identifier[np] . identifier[sign] ( identifier[cdelt1] ), identifier[np] . identifier[sign] ( identifier[cdelt2] )]
identifier[reuse_image] = identifier[self] . identifier[settings] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] ( keyword[not] identifier[reuse_image] ) keyword[or] ( identifier[self] . identifier[img_mosaic] keyword[is] keyword[None] ):
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[fv] . identifier[gui_do] ( identifier[self] . identifier[_prepare_mosaic1] , literal[string] )
identifier[self] . identifier[img_mosaic] = keyword[None]
identifier[img_mosaic] = identifier[dp] . identifier[create_blank_image] ( identifier[ra_deg] , identifier[dec_deg] ,
identifier[fov_deg] , identifier[px_scale] ,
identifier[rot_deg] ,
identifier[cdbase] = identifier[cdbase] ,
identifier[logger] = identifier[self] . identifier[logger] ,
identifier[pfx] = literal[string] ,
identifier[dtype] = identifier[dtype] )
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[img_mosaic] . identifier[set] ( identifier[name] = identifier[name] )
identifier[imname] = identifier[img_mosaic] . identifier[get] ( literal[string] , identifier[image] . identifier[get] ( literal[string] , literal[string] ))
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %( identifier[imname] ))
identifier[nothumb] = keyword[not] identifier[self] . identifier[settings] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[nothumb] :
identifier[img_mosaic] . identifier[set] ( identifier[nothumb] = keyword[True] )
identifier[img_mosaic] . identifier[set] ( identifier[path] = keyword[None] )
identifier[self] . identifier[img_mosaic] = identifier[img_mosaic]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] %( identifier[imname] ))
identifier[self] . identifier[fv] . identifier[gui_call] ( identifier[self] . identifier[fv] . identifier[add_image] , identifier[imname] , identifier[img_mosaic] ,
identifier[chname] = identifier[self] . identifier[mosaic_chname] )
keyword[else] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[fv] . identifier[gui_do] (
identifier[self] . identifier[_prepare_mosaic1] , literal[string] )
identifier[img_mosaic] = identifier[dp] . identifier[recycle_image] ( identifier[self] . identifier[img_mosaic] ,
identifier[ra_deg] , identifier[dec_deg] ,
identifier[fov_deg] , identifier[px_scale] ,
identifier[rot_deg] ,
identifier[cdbase] = identifier[cdbase] ,
identifier[logger] = identifier[self] . identifier[logger] ,
identifier[pfx] = literal[string] )
identifier[header] = identifier[img_mosaic] . identifier[get_header] ()
( identifier[rot] , identifier[cdelt1] , identifier[cdelt2] )= identifier[wcs] . identifier[get_rotation_and_scale] (
identifier[header] , identifier[skew_threshold] = identifier[skew_limit] )
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] %(
identifier[rot] , identifier[cdelt1] , identifier[cdelt2] ))
keyword[return] identifier[img_mosaic] | def prepare_mosaic(self, image, fov_deg, name=None):
"""Prepare a new (blank) mosaic image based on the pointing of
the parameter image
"""
header = image.get_header()
(ra_deg, dec_deg) = (header['CRVAL1'], header['CRVAL2'])
data_np = image.get_data()
#dtype = data_np.dtype
dtype = None
self.bg_ref = iqcalc.get_median(data_np)
# TODO: handle skew (differing rotation for each axis)?
skew_limit = self.settings.get('skew_limit', 0.1)
(rot_deg, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header, skew_threshold=skew_limit)
self.logger.debug('image0 rot=%f cdelt1=%f cdelt2=%f' % (rot_deg, cdelt1, cdelt2))
# Prepare pixel scale for each axis
px_scale = (math.fabs(cdelt1), math.fabs(cdelt2))
cdbase = [np.sign(cdelt1), np.sign(cdelt2)]
reuse_image = self.settings.get('reuse_image', False)
if not reuse_image or self.img_mosaic is None:
self.logger.debug('creating blank image to hold mosaic')
self.fv.gui_do(self._prepare_mosaic1, 'Creating blank image...')
# GC old mosaic
self.img_mosaic = None
img_mosaic = dp.create_blank_image(ra_deg, dec_deg, fov_deg, px_scale, rot_deg, cdbase=cdbase, logger=self.logger, pfx='mosaic', dtype=dtype)
if name is not None:
img_mosaic.set(name=name) # depends on [control=['if'], data=['name']]
imname = img_mosaic.get('name', image.get('name', 'NoName'))
self.logger.debug("mosaic name is '%s'" % imname)
# avoid making a thumbnail of this if seed image is also that way
nothumb = not self.settings.get('make_thumbs', False)
if nothumb:
img_mosaic.set(nothumb=True) # depends on [control=['if'], data=[]]
# image is not on disk, set indication for other plugins
img_mosaic.set(path=None)
# TODO: fill in interesting/select object headers from seed image
self.img_mosaic = img_mosaic
self.logger.info("adding mosaic image '%s' to channel" % imname)
self.fv.gui_call(self.fv.add_image, imname, img_mosaic, chname=self.mosaic_chname) # depends on [control=['if'], data=[]]
else:
# <-- reuse image (faster)
self.logger.debug('Reusing previous mosaic image')
self.fv.gui_do(self._prepare_mosaic1, 'Reusing previous mosaic image...')
img_mosaic = dp.recycle_image(self.img_mosaic, ra_deg, dec_deg, fov_deg, px_scale, rot_deg, cdbase=cdbase, logger=self.logger, pfx='mosaic')
header = img_mosaic.get_header()
(rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header, skew_threshold=skew_limit)
self.logger.debug('mosaic rot=%f cdelt1=%f cdelt2=%f' % (rot, cdelt1, cdelt2))
return img_mosaic |
def validate_url(self, original_string):
"""Returns the original string if it was valid, raises an argument
error if it's not.
"""
# nipped from stack overflow: http://stackoverflow.com/questions/827557/how-do-you-validate-a-url-with-a-regular-expression-in-python
# I preferred this to the thorough regex approach for simplicity and
# readability
pieces = urlparse.urlparse(original_string)
try:
if self.path_only:
assert not any([pieces.scheme, pieces.netloc])
assert pieces.path
else:
assert all([pieces.scheme, pieces.netloc])
valid_chars = set(string.letters + string.digits + ":-_.")
assert set(pieces.netloc) <= valid_chars
assert pieces.scheme in ['http', 'https']
except AssertionError as e:
raise ArgumentError(self.item_name,
"The input you've provided is not a valid URL.")
return pieces | def function[validate_url, parameter[self, original_string]]:
constant[Returns the original string if it was valid, raises an argument
error if it's not.
]
variable[pieces] assign[=] call[name[urlparse].urlparse, parameter[name[original_string]]]
<ast.Try object at 0x7da2054a55d0>
return[name[pieces]] | keyword[def] identifier[validate_url] ( identifier[self] , identifier[original_string] ):
literal[string]
identifier[pieces] = identifier[urlparse] . identifier[urlparse] ( identifier[original_string] )
keyword[try] :
keyword[if] identifier[self] . identifier[path_only] :
keyword[assert] keyword[not] identifier[any] ([ identifier[pieces] . identifier[scheme] , identifier[pieces] . identifier[netloc] ])
keyword[assert] identifier[pieces] . identifier[path]
keyword[else] :
keyword[assert] identifier[all] ([ identifier[pieces] . identifier[scheme] , identifier[pieces] . identifier[netloc] ])
identifier[valid_chars] = identifier[set] ( identifier[string] . identifier[letters] + identifier[string] . identifier[digits] + literal[string] )
keyword[assert] identifier[set] ( identifier[pieces] . identifier[netloc] )<= identifier[valid_chars]
keyword[assert] identifier[pieces] . identifier[scheme] keyword[in] [ literal[string] , literal[string] ]
keyword[except] identifier[AssertionError] keyword[as] identifier[e] :
keyword[raise] identifier[ArgumentError] ( identifier[self] . identifier[item_name] ,
literal[string] )
keyword[return] identifier[pieces] | def validate_url(self, original_string):
"""Returns the original string if it was valid, raises an argument
error if it's not.
"""
# nipped from stack overflow: http://stackoverflow.com/questions/827557/how-do-you-validate-a-url-with-a-regular-expression-in-python
# I preferred this to the thorough regex approach for simplicity and
# readability
pieces = urlparse.urlparse(original_string)
try:
if self.path_only:
assert not any([pieces.scheme, pieces.netloc])
assert pieces.path # depends on [control=['if'], data=[]]
else:
assert all([pieces.scheme, pieces.netloc])
valid_chars = set(string.letters + string.digits + ':-_.')
assert set(pieces.netloc) <= valid_chars
assert pieces.scheme in ['http', 'https'] # depends on [control=['try'], data=[]]
except AssertionError as e:
raise ArgumentError(self.item_name, "The input you've provided is not a valid URL.") # depends on [control=['except'], data=[]]
return pieces |
def setUpClassDef(self, service):
'''use soapAction dict for WS-Action input, setup wsAction
dict for grabbing WS-Action output values.
'''
assert isinstance(service, WSDLTools.Service), \
'expecting WSDLTools.Service instance'
s = self._services[service.name].classdef
print >>s, 'class %s(%s):' %(self.getClassName(service.name), self.base_class_name)
print >>s, '%ssoapAction = {}' % self.getIndent(level=1)
print >>s, '%swsAction = {}' % self.getIndent(level=1)
print >>s, '%sroot = {}' % self.getIndent(level=1) | def function[setUpClassDef, parameter[self, service]]:
constant[use soapAction dict for WS-Action input, setup wsAction
dict for grabbing WS-Action output values.
]
assert[call[name[isinstance], parameter[name[service], name[WSDLTools].Service]]]
variable[s] assign[=] call[name[self]._services][name[service].name].classdef
tuple[[<ast.BinOp object at 0x7da18bc72e90>, <ast.BinOp object at 0x7da18bc73130>]]
tuple[[<ast.BinOp object at 0x7da18bc727a0>, <ast.BinOp object at 0x7da18bc707c0>]]
tuple[[<ast.BinOp object at 0x7da18bc72200>, <ast.BinOp object at 0x7da18bc72f80>]]
tuple[[<ast.BinOp object at 0x7da18bc73550>, <ast.BinOp object at 0x7da18bc73370>]] | keyword[def] identifier[setUpClassDef] ( identifier[self] , identifier[service] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[service] , identifier[WSDLTools] . identifier[Service] ), literal[string]
identifier[s] = identifier[self] . identifier[_services] [ identifier[service] . identifier[name] ]. identifier[classdef]
identifier[print] >> identifier[s] , literal[string] %( identifier[self] . identifier[getClassName] ( identifier[service] . identifier[name] ), identifier[self] . identifier[base_class_name] )
identifier[print] >> identifier[s] , literal[string] % identifier[self] . identifier[getIndent] ( identifier[level] = literal[int] )
identifier[print] >> identifier[s] , literal[string] % identifier[self] . identifier[getIndent] ( identifier[level] = literal[int] )
identifier[print] >> identifier[s] , literal[string] % identifier[self] . identifier[getIndent] ( identifier[level] = literal[int] ) | def setUpClassDef(self, service):
"""use soapAction dict for WS-Action input, setup wsAction
dict for grabbing WS-Action output values.
"""
assert isinstance(service, WSDLTools.Service), 'expecting WSDLTools.Service instance'
s = self._services[service.name].classdef
(print >> s, 'class %s(%s):' % (self.getClassName(service.name), self.base_class_name))
(print >> s, '%ssoapAction = {}' % self.getIndent(level=1))
(print >> s, '%swsAction = {}' % self.getIndent(level=1))
(print >> s, '%sroot = {}' % self.getIndent(level=1)) |
def output_raw(self, text):
"""
Output results in raw JSON format
"""
payload = json.loads(text)
out = json.dumps(payload, sort_keys=True, indent=self._indent, separators=(',', ': '))
print(self.colorize_json(out)) | def function[output_raw, parameter[self, text]]:
constant[
Output results in raw JSON format
]
variable[payload] assign[=] call[name[json].loads, parameter[name[text]]]
variable[out] assign[=] call[name[json].dumps, parameter[name[payload]]]
call[name[print], parameter[call[name[self].colorize_json, parameter[name[out]]]]] | keyword[def] identifier[output_raw] ( identifier[self] , identifier[text] ):
literal[string]
identifier[payload] = identifier[json] . identifier[loads] ( identifier[text] )
identifier[out] = identifier[json] . identifier[dumps] ( identifier[payload] , identifier[sort_keys] = keyword[True] , identifier[indent] = identifier[self] . identifier[_indent] , identifier[separators] =( literal[string] , literal[string] ))
identifier[print] ( identifier[self] . identifier[colorize_json] ( identifier[out] )) | def output_raw(self, text):
"""
Output results in raw JSON format
"""
payload = json.loads(text)
out = json.dumps(payload, sort_keys=True, indent=self._indent, separators=(',', ': '))
print(self.colorize_json(out)) |
def unlock(name,
zk_hosts=None, # in case you need to unlock without having run lock (failed execution for example)
identifier=None,
max_concurrency=1,
ephemeral_lease=False,
profile=None,
scheme=None,
username=None,
password=None,
default_acl=None):
'''
Remove lease from semaphore.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
conn_kwargs = {'profile': profile, 'scheme': scheme,
'username': username, 'password': password, 'default_acl': default_acl}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Released lock if it is here'
return ret
if identifier is None:
identifier = __grains__['id']
unlocked = __salt__['zk_concurrency.unlock'](name,
zk_hosts=zk_hosts,
identifier=identifier,
max_concurrency=max_concurrency,
ephemeral_lease=ephemeral_lease,
**conn_kwargs)
if unlocked:
ret['result'] = True
else:
ret['comment'] = 'Unable to find lease for path {0}'.format(name)
return ret | def function[unlock, parameter[name, zk_hosts, identifier, max_concurrency, ephemeral_lease, profile, scheme, username, password, default_acl]]:
constant[
Remove lease from semaphore.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18ede5d80>, <ast.Constant object at 0x7da18ede52d0>, <ast.Constant object at 0x7da18ede4400>, <ast.Constant object at 0x7da18ede7580>], [<ast.Name object at 0x7da18ede7610>, <ast.Dict object at 0x7da18ede4b20>, <ast.Constant object at 0x7da18ede7a00>, <ast.Constant object at 0x7da18ede49a0>]]
variable[conn_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da18ede7c40>, <ast.Constant object at 0x7da18ede5030>, <ast.Constant object at 0x7da18ede4370>, <ast.Constant object at 0x7da18ede4040>, <ast.Constant object at 0x7da18ede72e0>], [<ast.Name object at 0x7da18ede5750>, <ast.Name object at 0x7da18ede4e20>, <ast.Name object at 0x7da18ede56c0>, <ast.Name object at 0x7da18ede62f0>, <ast.Name object at 0x7da18ede5f60>]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] constant[Released lock if it is here]
return[name[ret]]
if compare[name[identifier] is constant[None]] begin[:]
variable[identifier] assign[=] call[name[__grains__]][constant[id]]
variable[unlocked] assign[=] call[call[name[__salt__]][constant[zk_concurrency.unlock]], parameter[name[name]]]
if name[unlocked] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
return[name[ret]] | keyword[def] identifier[unlock] ( identifier[name] ,
identifier[zk_hosts] = keyword[None] ,
identifier[identifier] = keyword[None] ,
identifier[max_concurrency] = literal[int] ,
identifier[ephemeral_lease] = keyword[False] ,
identifier[profile] = keyword[None] ,
identifier[scheme] = keyword[None] ,
identifier[username] = keyword[None] ,
identifier[password] = keyword[None] ,
identifier[default_acl] = keyword[None] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[False] ,
literal[string] : literal[string] }
identifier[conn_kwargs] ={ literal[string] : identifier[profile] , literal[string] : identifier[scheme] ,
literal[string] : identifier[username] , literal[string] : identifier[password] , literal[string] : identifier[default_acl] }
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] identifier[identifier] keyword[is] keyword[None] :
identifier[identifier] = identifier[__grains__] [ literal[string] ]
identifier[unlocked] = identifier[__salt__] [ literal[string] ]( identifier[name] ,
identifier[zk_hosts] = identifier[zk_hosts] ,
identifier[identifier] = identifier[identifier] ,
identifier[max_concurrency] = identifier[max_concurrency] ,
identifier[ephemeral_lease] = identifier[ephemeral_lease] ,
** identifier[conn_kwargs] )
keyword[if] identifier[unlocked] :
identifier[ret] [ literal[string] ]= keyword[True]
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def unlock(name, zk_hosts=None, identifier=None, max_concurrency=1, ephemeral_lease=False, profile=None, scheme=None, username=None, password=None, default_acl=None): # in case you need to unlock without having run lock (failed execution for example)
'\n Remove lease from semaphore.\n '
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
conn_kwargs = {'profile': profile, 'scheme': scheme, 'username': username, 'password': password, 'default_acl': default_acl}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Released lock if it is here'
return ret # depends on [control=['if'], data=[]]
if identifier is None:
identifier = __grains__['id'] # depends on [control=['if'], data=['identifier']]
unlocked = __salt__['zk_concurrency.unlock'](name, zk_hosts=zk_hosts, identifier=identifier, max_concurrency=max_concurrency, ephemeral_lease=ephemeral_lease, **conn_kwargs)
if unlocked:
ret['result'] = True # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Unable to find lease for path {0}'.format(name)
return ret |
def ListMappedNetworkDrives():
'''
On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable)
'''
if sys.platform != 'win32':
raise NotImplementedError
drives_list = []
netuse = _CallWindowsNetCommand(['use'])
for line in netuse.split(EOL_STYLE_WINDOWS):
match = re.match("(\w*)\s+(\w:)\s+(.+)", line.rstrip())
if match:
drives_list.append((match.group(2), match.group(3), match.group(1) == 'OK'))
return drives_list | def function[ListMappedNetworkDrives, parameter[]]:
constant[
On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable)
]
if compare[name[sys].platform not_equal[!=] constant[win32]] begin[:]
<ast.Raise object at 0x7da20c6c7b20>
variable[drives_list] assign[=] list[[]]
variable[netuse] assign[=] call[name[_CallWindowsNetCommand], parameter[list[[<ast.Constant object at 0x7da18bcc9690>]]]]
for taget[name[line]] in starred[call[name[netuse].split, parameter[name[EOL_STYLE_WINDOWS]]]] begin[:]
variable[match] assign[=] call[name[re].match, parameter[constant[(\w*)\s+(\w:)\s+(.+)], call[name[line].rstrip, parameter[]]]]
if name[match] begin[:]
call[name[drives_list].append, parameter[tuple[[<ast.Call object at 0x7da18bccac50>, <ast.Call object at 0x7da18bcca0e0>, <ast.Compare object at 0x7da18bccb460>]]]]
return[name[drives_list]] | keyword[def] identifier[ListMappedNetworkDrives] ():
literal[string]
keyword[if] identifier[sys] . identifier[platform] != literal[string] :
keyword[raise] identifier[NotImplementedError]
identifier[drives_list] =[]
identifier[netuse] = identifier[_CallWindowsNetCommand] ([ literal[string] ])
keyword[for] identifier[line] keyword[in] identifier[netuse] . identifier[split] ( identifier[EOL_STYLE_WINDOWS] ):
identifier[match] = identifier[re] . identifier[match] ( literal[string] , identifier[line] . identifier[rstrip] ())
keyword[if] identifier[match] :
identifier[drives_list] . identifier[append] (( identifier[match] . identifier[group] ( literal[int] ), identifier[match] . identifier[group] ( literal[int] ), identifier[match] . identifier[group] ( literal[int] )== literal[string] ))
keyword[return] identifier[drives_list] | def ListMappedNetworkDrives():
"""
On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable)
"""
if sys.platform != 'win32':
raise NotImplementedError # depends on [control=['if'], data=[]]
drives_list = []
netuse = _CallWindowsNetCommand(['use'])
for line in netuse.split(EOL_STYLE_WINDOWS):
match = re.match('(\\w*)\\s+(\\w:)\\s+(.+)', line.rstrip())
if match:
drives_list.append((match.group(2), match.group(3), match.group(1) == 'OK')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return drives_list |
def read(key, root=''):
'''
Read from SysFS
:param key: file or path in SysFS; if key is a list then root will be prefixed on each key
:return: the full (tree of) SysFS attributes under key
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/net/em1/statistics
'''
if not isinstance(key, six.string_types):
res = {}
for akey in key:
ares = read(os.path.join(root, akey))
if ares is not False:
res[akey] = ares
return res
key = target(os.path.join(root, key))
if key is False:
return False
elif os.path.isdir(key):
keys = interfaces(key)
result = {}
for subkey in keys['r'] + keys['rw']:
subval = read(os.path.join(key, subkey))
if subval is not False:
subkeys = subkey.split('/')
subkey = subkeys.pop()
subresult = result
if subkeys:
for skey in subkeys:
if skey not in subresult:
subresult[skey] = {}
subresult = subresult[skey]
subresult[subkey] = subval
return result
else:
try:
log.trace('Reading %s...', key)
# Certain things in SysFS are pipes 'n such.
# This opens it non-blocking, which prevents indefinite blocking
with os.fdopen(os.open(key, os.O_RDONLY | os.O_NONBLOCK)) as treader:
# alternative method for the same idea, but only works for completely empty pipes
# treader = select.select([treader], [], [], 1)[0][0]
val = treader.read().strip()
if not val:
return False
try:
val = int(val)
except Exception:
try:
val = float(val)
except Exception:
pass
return val
except Exception:
return False | def function[read, parameter[key, root]]:
constant[
Read from SysFS
:param key: file or path in SysFS; if key is a list then root will be prefixed on each key
:return: the full (tree of) SysFS attributes under key
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/net/em1/statistics
]
if <ast.UnaryOp object at 0x7da20c7963e0> begin[:]
variable[res] assign[=] dictionary[[], []]
for taget[name[akey]] in starred[name[key]] begin[:]
variable[ares] assign[=] call[name[read], parameter[call[name[os].path.join, parameter[name[root], name[akey]]]]]
if compare[name[ares] is_not constant[False]] begin[:]
call[name[res]][name[akey]] assign[=] name[ares]
return[name[res]]
variable[key] assign[=] call[name[target], parameter[call[name[os].path.join, parameter[name[root], name[key]]]]]
if compare[name[key] is constant[False]] begin[:]
return[constant[False]] | keyword[def] identifier[read] ( identifier[key] , identifier[root] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[key] , identifier[six] . identifier[string_types] ):
identifier[res] ={}
keyword[for] identifier[akey] keyword[in] identifier[key] :
identifier[ares] = identifier[read] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[akey] ))
keyword[if] identifier[ares] keyword[is] keyword[not] keyword[False] :
identifier[res] [ identifier[akey] ]= identifier[ares]
keyword[return] identifier[res]
identifier[key] = identifier[target] ( identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[key] ))
keyword[if] identifier[key] keyword[is] keyword[False] :
keyword[return] keyword[False]
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[key] ):
identifier[keys] = identifier[interfaces] ( identifier[key] )
identifier[result] ={}
keyword[for] identifier[subkey] keyword[in] identifier[keys] [ literal[string] ]+ identifier[keys] [ literal[string] ]:
identifier[subval] = identifier[read] ( identifier[os] . identifier[path] . identifier[join] ( identifier[key] , identifier[subkey] ))
keyword[if] identifier[subval] keyword[is] keyword[not] keyword[False] :
identifier[subkeys] = identifier[subkey] . identifier[split] ( literal[string] )
identifier[subkey] = identifier[subkeys] . identifier[pop] ()
identifier[subresult] = identifier[result]
keyword[if] identifier[subkeys] :
keyword[for] identifier[skey] keyword[in] identifier[subkeys] :
keyword[if] identifier[skey] keyword[not] keyword[in] identifier[subresult] :
identifier[subresult] [ identifier[skey] ]={}
identifier[subresult] = identifier[subresult] [ identifier[skey] ]
identifier[subresult] [ identifier[subkey] ]= identifier[subval]
keyword[return] identifier[result]
keyword[else] :
keyword[try] :
identifier[log] . identifier[trace] ( literal[string] , identifier[key] )
keyword[with] identifier[os] . identifier[fdopen] ( identifier[os] . identifier[open] ( identifier[key] , identifier[os] . identifier[O_RDONLY] | identifier[os] . identifier[O_NONBLOCK] )) keyword[as] identifier[treader] :
identifier[val] = identifier[treader] . identifier[read] (). identifier[strip] ()
keyword[if] keyword[not] identifier[val] :
keyword[return] keyword[False]
keyword[try] :
identifier[val] = identifier[int] ( identifier[val] )
keyword[except] identifier[Exception] :
keyword[try] :
identifier[val] = identifier[float] ( identifier[val] )
keyword[except] identifier[Exception] :
keyword[pass]
keyword[return] identifier[val]
keyword[except] identifier[Exception] :
keyword[return] keyword[False] | def read(key, root=''):
"""
Read from SysFS
:param key: file or path in SysFS; if key is a list then root will be prefixed on each key
:return: the full (tree of) SysFS attributes under key
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/net/em1/statistics
"""
if not isinstance(key, six.string_types):
res = {}
for akey in key:
ares = read(os.path.join(root, akey))
if ares is not False:
res[akey] = ares # depends on [control=['if'], data=['ares']] # depends on [control=['for'], data=['akey']]
return res # depends on [control=['if'], data=[]]
key = target(os.path.join(root, key))
if key is False:
return False # depends on [control=['if'], data=[]]
elif os.path.isdir(key):
keys = interfaces(key)
result = {}
for subkey in keys['r'] + keys['rw']:
subval = read(os.path.join(key, subkey))
if subval is not False:
subkeys = subkey.split('/')
subkey = subkeys.pop()
subresult = result
if subkeys:
for skey in subkeys:
if skey not in subresult:
subresult[skey] = {} # depends on [control=['if'], data=['skey', 'subresult']]
subresult = subresult[skey] # depends on [control=['for'], data=['skey']] # depends on [control=['if'], data=[]]
subresult[subkey] = subval # depends on [control=['if'], data=['subval']] # depends on [control=['for'], data=['subkey']]
return result # depends on [control=['if'], data=[]]
else:
try:
log.trace('Reading %s...', key)
# Certain things in SysFS are pipes 'n such.
# This opens it non-blocking, which prevents indefinite blocking
with os.fdopen(os.open(key, os.O_RDONLY | os.O_NONBLOCK)) as treader:
# alternative method for the same idea, but only works for completely empty pipes
# treader = select.select([treader], [], [], 1)[0][0]
val = treader.read().strip()
if not val:
return False # depends on [control=['if'], data=[]]
try:
val = int(val) # depends on [control=['try'], data=[]]
except Exception:
try:
val = float(val) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
return val # depends on [control=['with'], data=['treader']] # depends on [control=['try'], data=[]]
except Exception:
return False # depends on [control=['except'], data=[]] |
def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be \
considered
:return: None
This must be run prior to actually parsing the genotypes because it
initializes the following instance members:
* ind_mask
* total_locus_count
* locus_count
* data_parser.boundary (adds loci with too much missingness)
"""
missing = None
locus_count = 0
logging.info("Sorting out missing data from genotype data")
# Filter out individuals according to missingness
self.genotype_file.seek(0)
magic, data_format = struct.unpack("<HB", self.genotype_file.read(3))
if data_format != 1:
Exit(("MVTEST is currently unable to read data formatted as " +
"individual major. You must regenerate your data in SNP major"+
" format. "))
self.bytes_per_read = self.ind_count / 4
if self.ind_count % 4 > 0:
self.bytes_per_read += 1
self.fmt_string = "<" + "B"*self.bytes_per_read
last_chr = -1
for index in range(self.locus_count):
buffer = struct.unpack(self.fmt_string,
self.genotype_file.read(self.bytes_per_read))
chr, pos = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr
genotypes = numpy.array(self.extract_genotypes(buffer),
dtype=numpy.int8)
locus_count += 1
if missing is None:
missing = numpy.zeros(genotypes.shape[0], dtype='int8')
missing += 0+(genotypes==DataParser.missing_storage)
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0+(max_missing<missing)
self.ind_mask = self.ind_mask|dropped_individuals
valid_individuals = numpy.sum(self.ind_mask==0)
max_missing = DataParser.snp_miss_tol * valid_individuals
# We can't merge these two iterations since we need to know which
# individuals to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
self.genotype_file.read(3)
self.total_locus_count = self.locus_count
self.locus_count = 0
last_chr = -1
for index in range(self.total_locus_count):
buffer = struct.unpack(self.fmt_string,
self.genotype_file.read(self.bytes_per_read))
genotypes = numpy.ma.MaskedArray(self.extract_genotypes(buffer),
self.ind_mask).compressed()
chr, pos = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr
missing = numpy.sum(0+(genotypes==DataParser.missing_storage))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid)
else:
self.locus_count += 1 | def function[filter_missing, parameter[self]]:
constant[Filter out individuals and SNPs that have too many missing to be considered
:return: None
This must be run prior to actually parsing the genotypes because it
initializes the following instance members:
* ind_mask
* total_locus_count
* locus_count
* data_parser.boundary (adds loci with too much missingness)
]
variable[missing] assign[=] constant[None]
variable[locus_count] assign[=] constant[0]
call[name[logging].info, parameter[constant[Sorting out missing data from genotype data]]]
call[name[self].genotype_file.seek, parameter[constant[0]]]
<ast.Tuple object at 0x7da2047ea140> assign[=] call[name[struct].unpack, parameter[constant[<HB], call[name[self].genotype_file.read, parameter[constant[3]]]]]
if compare[name[data_format] not_equal[!=] constant[1]] begin[:]
call[name[Exit], parameter[binary_operation[binary_operation[constant[MVTEST is currently unable to read data formatted as ] + constant[individual major. You must regenerate your data in SNP major]] + constant[ format. ]]]]
name[self].bytes_per_read assign[=] binary_operation[name[self].ind_count / constant[4]]
if compare[binary_operation[name[self].ind_count <ast.Mod object at 0x7da2590d6920> constant[4]] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da2047e84c0>
name[self].fmt_string assign[=] binary_operation[constant[<] + binary_operation[constant[B] * name[self].bytes_per_read]]
variable[last_chr] assign[=] <ast.UnaryOp object at 0x7da2047e8490>
for taget[name[index]] in starred[call[name[range], parameter[name[self].locus_count]]] begin[:]
variable[buffer] assign[=] call[name[struct].unpack, parameter[name[self].fmt_string, call[name[self].genotype_file.read, parameter[name[self].bytes_per_read]]]]
<ast.Tuple object at 0x7da2047e9390> assign[=] call[name[self].markers][name[index]]
variable[rsid] assign[=] call[name[self].rsids][name[index]]
if call[name[DataParser].boundary.TestBoundary, parameter[name[chr], name[pos], name[rsid]]] begin[:]
if compare[name[last_chr] not_equal[!=] name[chr]] begin[:]
call[name[sys].stdout.flush, parameter[]]
variable[last_chr] assign[=] name[chr]
variable[genotypes] assign[=] call[name[numpy].array, parameter[call[name[self].extract_genotypes, parameter[name[buffer]]]]]
<ast.AugAssign object at 0x7da2047e8400>
if compare[name[missing] is constant[None]] begin[:]
variable[missing] assign[=] call[name[numpy].zeros, parameter[call[name[genotypes].shape][constant[0]]]]
<ast.AugAssign object at 0x7da2047e8700>
variable[max_missing] assign[=] binary_operation[name[DataParser].ind_miss_tol * name[locus_count]]
variable[dropped_individuals] assign[=] binary_operation[constant[0] + compare[name[max_missing] less[<] name[missing]]]
name[self].ind_mask assign[=] binary_operation[name[self].ind_mask <ast.BitOr object at 0x7da2590d6aa0> name[dropped_individuals]]
variable[valid_individuals] assign[=] call[name[numpy].sum, parameter[compare[name[self].ind_mask equal[==] constant[0]]]]
variable[max_missing] assign[=] binary_operation[name[DataParser].snp_miss_tol * name[valid_individuals]]
variable[dropped_snps] assign[=] list[[]]
call[name[self].genotype_file.seek, parameter[constant[0]]]
call[name[self].genotype_file.read, parameter[constant[3]]]
name[self].total_locus_count assign[=] name[self].locus_count
name[self].locus_count assign[=] constant[0]
variable[last_chr] assign[=] <ast.UnaryOp object at 0x7da18f00cf10>
for taget[name[index]] in starred[call[name[range], parameter[name[self].total_locus_count]]] begin[:]
variable[buffer] assign[=] call[name[struct].unpack, parameter[name[self].fmt_string, call[name[self].genotype_file.read, parameter[name[self].bytes_per_read]]]]
variable[genotypes] assign[=] call[call[name[numpy].ma.MaskedArray, parameter[call[name[self].extract_genotypes, parameter[name[buffer]]], name[self].ind_mask]].compressed, parameter[]]
<ast.Tuple object at 0x7da18f00d720> assign[=] call[name[self].markers][name[index]]
variable[rsid] assign[=] call[name[self].rsids][name[index]]
if call[name[DataParser].boundary.TestBoundary, parameter[name[chr], name[pos], name[rsid]]] begin[:]
if compare[name[last_chr] not_equal[!=] name[chr]] begin[:]
call[name[sys].stdout.flush, parameter[]]
variable[last_chr] assign[=] name[chr]
variable[missing] assign[=] call[name[numpy].sum, parameter[binary_operation[constant[0] + compare[name[genotypes] equal[==] name[DataParser].missing_storage]]]]
if compare[name[missing] greater[>] name[max_missing]] begin[:]
call[call[name[DataParser].boundary.dropped_snps][call[name[int], parameter[name[chr]]]].add, parameter[call[name[int], parameter[name[pos]]]]]
call[name[dropped_snps].append, parameter[name[rsid]]] | keyword[def] identifier[filter_missing] ( identifier[self] ):
literal[string]
identifier[missing] = keyword[None]
identifier[locus_count] = literal[int]
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[genotype_file] . identifier[seek] ( literal[int] )
identifier[magic] , identifier[data_format] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[self] . identifier[genotype_file] . identifier[read] ( literal[int] ))
keyword[if] identifier[data_format] != literal[int] :
identifier[Exit] (( literal[string] +
literal[string] +
literal[string] ))
identifier[self] . identifier[bytes_per_read] = identifier[self] . identifier[ind_count] / literal[int]
keyword[if] identifier[self] . identifier[ind_count] % literal[int] > literal[int] :
identifier[self] . identifier[bytes_per_read] += literal[int]
identifier[self] . identifier[fmt_string] = literal[string] + literal[string] * identifier[self] . identifier[bytes_per_read]
identifier[last_chr] =- literal[int]
keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[self] . identifier[locus_count] ):
identifier[buffer] = identifier[struct] . identifier[unpack] ( identifier[self] . identifier[fmt_string] ,
identifier[self] . identifier[genotype_file] . identifier[read] ( identifier[self] . identifier[bytes_per_read] ))
identifier[chr] , identifier[pos] = identifier[self] . identifier[markers] [ identifier[index] ]
identifier[rsid] = identifier[self] . identifier[rsids] [ identifier[index] ]
keyword[if] identifier[DataParser] . identifier[boundary] . identifier[TestBoundary] ( identifier[chr] , identifier[pos] , identifier[rsid] ):
keyword[if] identifier[last_chr] != identifier[chr] :
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[last_chr] = identifier[chr]
identifier[genotypes] = identifier[numpy] . identifier[array] ( identifier[self] . identifier[extract_genotypes] ( identifier[buffer] ),
identifier[dtype] = identifier[numpy] . identifier[int8] )
identifier[locus_count] += literal[int]
keyword[if] identifier[missing] keyword[is] keyword[None] :
identifier[missing] = identifier[numpy] . identifier[zeros] ( identifier[genotypes] . identifier[shape] [ literal[int] ], identifier[dtype] = literal[string] )
identifier[missing] += literal[int] +( identifier[genotypes] == identifier[DataParser] . identifier[missing_storage] )
identifier[max_missing] = identifier[DataParser] . identifier[ind_miss_tol] * identifier[locus_count]
identifier[dropped_individuals] = literal[int] +( identifier[max_missing] < identifier[missing] )
identifier[self] . identifier[ind_mask] = identifier[self] . identifier[ind_mask] | identifier[dropped_individuals]
identifier[valid_individuals] = identifier[numpy] . identifier[sum] ( identifier[self] . identifier[ind_mask] == literal[int] )
identifier[max_missing] = identifier[DataParser] . identifier[snp_miss_tol] * identifier[valid_individuals]
identifier[dropped_snps] =[]
identifier[self] . identifier[genotype_file] . identifier[seek] ( literal[int] )
identifier[self] . identifier[genotype_file] . identifier[read] ( literal[int] )
identifier[self] . identifier[total_locus_count] = identifier[self] . identifier[locus_count]
identifier[self] . identifier[locus_count] = literal[int]
identifier[last_chr] =- literal[int]
keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[self] . identifier[total_locus_count] ):
identifier[buffer] = identifier[struct] . identifier[unpack] ( identifier[self] . identifier[fmt_string] ,
identifier[self] . identifier[genotype_file] . identifier[read] ( identifier[self] . identifier[bytes_per_read] ))
identifier[genotypes] = identifier[numpy] . identifier[ma] . identifier[MaskedArray] ( identifier[self] . identifier[extract_genotypes] ( identifier[buffer] ),
identifier[self] . identifier[ind_mask] ). identifier[compressed] ()
identifier[chr] , identifier[pos] = identifier[self] . identifier[markers] [ identifier[index] ]
identifier[rsid] = identifier[self] . identifier[rsids] [ identifier[index] ]
keyword[if] identifier[DataParser] . identifier[boundary] . identifier[TestBoundary] ( identifier[chr] , identifier[pos] , identifier[rsid] ):
keyword[if] identifier[last_chr] != identifier[chr] :
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[last_chr] = identifier[chr]
identifier[missing] = identifier[numpy] . identifier[sum] ( literal[int] +( identifier[genotypes] == identifier[DataParser] . identifier[missing_storage] ))
keyword[if] identifier[missing] > identifier[max_missing] :
identifier[DataParser] . identifier[boundary] . identifier[dropped_snps] [ identifier[int] ( identifier[chr] )]. identifier[add] ( identifier[int] ( identifier[pos] ))
identifier[dropped_snps] . identifier[append] ( identifier[rsid] )
keyword[else] :
identifier[self] . identifier[locus_count] += literal[int] | def filter_missing(self):
"""Filter out individuals and SNPs that have too many missing to be considered
:return: None
This must be run prior to actually parsing the genotypes because it
initializes the following instance members:
* ind_mask
* total_locus_count
* locus_count
* data_parser.boundary (adds loci with too much missingness)
"""
missing = None
locus_count = 0
logging.info('Sorting out missing data from genotype data')
# Filter out individuals according to missingness
self.genotype_file.seek(0)
(magic, data_format) = struct.unpack('<HB', self.genotype_file.read(3))
if data_format != 1:
Exit('MVTEST is currently unable to read data formatted as ' + 'individual major. You must regenerate your data in SNP major' + ' format. ') # depends on [control=['if'], data=[]]
self.bytes_per_read = self.ind_count / 4
if self.ind_count % 4 > 0:
self.bytes_per_read += 1 # depends on [control=['if'], data=[]]
self.fmt_string = '<' + 'B' * self.bytes_per_read
last_chr = -1
for index in range(self.locus_count):
buffer = struct.unpack(self.fmt_string, self.genotype_file.read(self.bytes_per_read))
(chr, pos) = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr # depends on [control=['if'], data=['last_chr', 'chr']]
genotypes = numpy.array(self.extract_genotypes(buffer), dtype=numpy.int8)
locus_count += 1
if missing is None:
missing = numpy.zeros(genotypes.shape[0], dtype='int8') # depends on [control=['if'], data=['missing']]
missing += 0 + (genotypes == DataParser.missing_storage) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']]
max_missing = DataParser.ind_miss_tol * locus_count
dropped_individuals = 0 + (max_missing < missing)
self.ind_mask = self.ind_mask | dropped_individuals
valid_individuals = numpy.sum(self.ind_mask == 0)
max_missing = DataParser.snp_miss_tol * valid_individuals
# We can't merge these two iterations since we need to know which
# individuals to consider for filtering on MAF
dropped_snps = []
self.genotype_file.seek(0)
self.genotype_file.read(3)
self.total_locus_count = self.locus_count
self.locus_count = 0
last_chr = -1
for index in range(self.total_locus_count):
buffer = struct.unpack(self.fmt_string, self.genotype_file.read(self.bytes_per_read))
genotypes = numpy.ma.MaskedArray(self.extract_genotypes(buffer), self.ind_mask).compressed()
(chr, pos) = self.markers[index]
rsid = self.rsids[index]
if DataParser.boundary.TestBoundary(chr, pos, rsid):
if last_chr != chr:
sys.stdout.flush()
last_chr = chr # depends on [control=['if'], data=['last_chr', 'chr']]
missing = numpy.sum(0 + (genotypes == DataParser.missing_storage))
if missing > max_missing:
DataParser.boundary.dropped_snps[int(chr)].add(int(pos))
dropped_snps.append(rsid) # depends on [control=['if'], data=[]]
else:
self.locus_count += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']] |
def change_response(x, prob, index):
'''
change every response in x that matches 'index' by randomly sampling from prob
'''
#pdb.set_trace()
N = (x==index).sum()
#x[x==index]=9
x[x==index] = dist.sample(N) | def function[change_response, parameter[x, prob, index]]:
constant[
change every response in x that matches 'index' by randomly sampling from prob
]
variable[N] assign[=] call[compare[name[x] equal[==] name[index]].sum, parameter[]]
call[name[x]][compare[name[x] equal[==] name[index]]] assign[=] call[name[dist].sample, parameter[name[N]]] | keyword[def] identifier[change_response] ( identifier[x] , identifier[prob] , identifier[index] ):
literal[string]
identifier[N] =( identifier[x] == identifier[index] ). identifier[sum] ()
identifier[x] [ identifier[x] == identifier[index] ]= identifier[dist] . identifier[sample] ( identifier[N] ) | def change_response(x, prob, index):
"""
change every response in x that matches 'index' by randomly sampling from prob
"""
#pdb.set_trace()
N = (x == index).sum()
#x[x==index]=9
x[x == index] = dist.sample(N) |
def centerdc_gen(self):
"""Return the centered frequency range as a generator.
::
>>> print(list(Range(8).centerdc_gen()))
[-0.5, -0.375, -0.25, -0.125, 0.0, 0.125, 0.25, 0.375]
"""
for a in range(0, self.N):
yield (a-self.N/2) * self.df | def function[centerdc_gen, parameter[self]]:
constant[Return the centered frequency range as a generator.
::
>>> print(list(Range(8).centerdc_gen()))
[-0.5, -0.375, -0.25, -0.125, 0.0, 0.125, 0.25, 0.375]
]
for taget[name[a]] in starred[call[name[range], parameter[constant[0], name[self].N]]] begin[:]
<ast.Yield object at 0x7da1b01e69e0> | keyword[def] identifier[centerdc_gen] ( identifier[self] ):
literal[string]
keyword[for] identifier[a] keyword[in] identifier[range] ( literal[int] , identifier[self] . identifier[N] ):
keyword[yield] ( identifier[a] - identifier[self] . identifier[N] / literal[int] )* identifier[self] . identifier[df] | def centerdc_gen(self):
"""Return the centered frequency range as a generator.
::
>>> print(list(Range(8).centerdc_gen()))
[-0.5, -0.375, -0.25, -0.125, 0.0, 0.125, 0.25, 0.375]
"""
for a in range(0, self.N):
yield ((a - self.N / 2) * self.df) # depends on [control=['for'], data=['a']] |
def memory_read16(self, addr, num_halfwords, zone=None):
"""Reads memory from the target system in units of 16-bits.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_halfwords (int): number of half words to read
zone (str): memory zone to read from
Returns:
List of halfwords read from the target system.
Raises:
JLinkException: if memory could not be read
"""
return self.memory_read(addr, num_halfwords, zone=zone, nbits=16) | def function[memory_read16, parameter[self, addr, num_halfwords, zone]]:
constant[Reads memory from the target system in units of 16-bits.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_halfwords (int): number of half words to read
zone (str): memory zone to read from
Returns:
List of halfwords read from the target system.
Raises:
JLinkException: if memory could not be read
]
return[call[name[self].memory_read, parameter[name[addr], name[num_halfwords]]]] | keyword[def] identifier[memory_read16] ( identifier[self] , identifier[addr] , identifier[num_halfwords] , identifier[zone] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[memory_read] ( identifier[addr] , identifier[num_halfwords] , identifier[zone] = identifier[zone] , identifier[nbits] = literal[int] ) | def memory_read16(self, addr, num_halfwords, zone=None):
"""Reads memory from the target system in units of 16-bits.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_halfwords (int): number of half words to read
zone (str): memory zone to read from
Returns:
List of halfwords read from the target system.
Raises:
JLinkException: if memory could not be read
"""
return self.memory_read(addr, num_halfwords, zone=zone, nbits=16) |
def set_section(self, section):
"""Set a section. If section already exists, overwrite the old one.
"""
if not isinstance(section, Section):
raise Exception("You")
try:
self.remove_section(section.name)
except:
pass
self._sections[section.name] = copy.deepcopy(section) | def function[set_section, parameter[self, section]]:
constant[Set a section. If section already exists, overwrite the old one.
]
if <ast.UnaryOp object at 0x7da18eb54df0> begin[:]
<ast.Raise object at 0x7da18eb55f60>
<ast.Try object at 0x7da18eb54a60>
call[name[self]._sections][name[section].name] assign[=] call[name[copy].deepcopy, parameter[name[section]]] | keyword[def] identifier[set_section] ( identifier[self] , identifier[section] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[section] , identifier[Section] ):
keyword[raise] identifier[Exception] ( literal[string] )
keyword[try] :
identifier[self] . identifier[remove_section] ( identifier[section] . identifier[name] )
keyword[except] :
keyword[pass]
identifier[self] . identifier[_sections] [ identifier[section] . identifier[name] ]= identifier[copy] . identifier[deepcopy] ( identifier[section] ) | def set_section(self, section):
"""Set a section. If section already exists, overwrite the old one.
"""
if not isinstance(section, Section):
raise Exception('You') # depends on [control=['if'], data=[]]
try:
self.remove_section(section.name) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
self._sections[section.name] = copy.deepcopy(section) |
def collect(self):
"""
Collector Passenger stats
"""
if not os.access(self.config["bin"], os.X_OK):
self.log.error("Path %s does not exist or is not executable",
self.config["bin"])
return {}
dict_stats = self.get_passenger_memory_stats()
if len(dict_stats.keys()) == 0:
return {}
queue_stats = self.get_passenger_queue_stats()
if len(queue_stats.keys()) == 0:
return {}
overall_cpu = self.get_passenger_cpu_usage(dict_stats)
if overall_cpu >= 0:
self.publish("phusion_passenger_cpu", overall_cpu)
self.publish("total_passenger_procs", len(
dict_stats["passenger_procs"]))
self.publish("total_nginx_procs", len(dict_stats["nginx_procs"]))
self.publish("total_apache_procs", len(dict_stats["apache_procs"]))
self.publish("total_apache_memory", dict_stats["apache_mem_total"])
self.publish("total_nginx_memory", dict_stats["nginx_mem_total"])
self.publish("total_passenger_memory",
dict_stats["passenger_mem_total"])
self.publish("top_level_queue_size", queue_stats[
"top_level_queue_size"])
self.publish("passenger_queue_size", queue_stats[
"passenger_queue_size"]) | def function[collect, parameter[self]]:
constant[
Collector Passenger stats
]
if <ast.UnaryOp object at 0x7da18f7201c0> begin[:]
call[name[self].log.error, parameter[constant[Path %s does not exist or is not executable], call[name[self].config][constant[bin]]]]
return[dictionary[[], []]]
variable[dict_stats] assign[=] call[name[self].get_passenger_memory_stats, parameter[]]
if compare[call[name[len], parameter[call[name[dict_stats].keys, parameter[]]]] equal[==] constant[0]] begin[:]
return[dictionary[[], []]]
variable[queue_stats] assign[=] call[name[self].get_passenger_queue_stats, parameter[]]
if compare[call[name[len], parameter[call[name[queue_stats].keys, parameter[]]]] equal[==] constant[0]] begin[:]
return[dictionary[[], []]]
variable[overall_cpu] assign[=] call[name[self].get_passenger_cpu_usage, parameter[name[dict_stats]]]
if compare[name[overall_cpu] greater_or_equal[>=] constant[0]] begin[:]
call[name[self].publish, parameter[constant[phusion_passenger_cpu], name[overall_cpu]]]
call[name[self].publish, parameter[constant[total_passenger_procs], call[name[len], parameter[call[name[dict_stats]][constant[passenger_procs]]]]]]
call[name[self].publish, parameter[constant[total_nginx_procs], call[name[len], parameter[call[name[dict_stats]][constant[nginx_procs]]]]]]
call[name[self].publish, parameter[constant[total_apache_procs], call[name[len], parameter[call[name[dict_stats]][constant[apache_procs]]]]]]
call[name[self].publish, parameter[constant[total_apache_memory], call[name[dict_stats]][constant[apache_mem_total]]]]
call[name[self].publish, parameter[constant[total_nginx_memory], call[name[dict_stats]][constant[nginx_mem_total]]]]
call[name[self].publish, parameter[constant[total_passenger_memory], call[name[dict_stats]][constant[passenger_mem_total]]]]
call[name[self].publish, parameter[constant[top_level_queue_size], call[name[queue_stats]][constant[top_level_queue_size]]]]
call[name[self].publish, parameter[constant[passenger_queue_size], call[name[queue_stats]][constant[passenger_queue_size]]]] | keyword[def] identifier[collect] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[access] ( identifier[self] . identifier[config] [ literal[string] ], identifier[os] . identifier[X_OK] ):
identifier[self] . identifier[log] . identifier[error] ( literal[string] ,
identifier[self] . identifier[config] [ literal[string] ])
keyword[return] {}
identifier[dict_stats] = identifier[self] . identifier[get_passenger_memory_stats] ()
keyword[if] identifier[len] ( identifier[dict_stats] . identifier[keys] ())== literal[int] :
keyword[return] {}
identifier[queue_stats] = identifier[self] . identifier[get_passenger_queue_stats] ()
keyword[if] identifier[len] ( identifier[queue_stats] . identifier[keys] ())== literal[int] :
keyword[return] {}
identifier[overall_cpu] = identifier[self] . identifier[get_passenger_cpu_usage] ( identifier[dict_stats] )
keyword[if] identifier[overall_cpu] >= literal[int] :
identifier[self] . identifier[publish] ( literal[string] , identifier[overall_cpu] )
identifier[self] . identifier[publish] ( literal[string] , identifier[len] (
identifier[dict_stats] [ literal[string] ]))
identifier[self] . identifier[publish] ( literal[string] , identifier[len] ( identifier[dict_stats] [ literal[string] ]))
identifier[self] . identifier[publish] ( literal[string] , identifier[len] ( identifier[dict_stats] [ literal[string] ]))
identifier[self] . identifier[publish] ( literal[string] , identifier[dict_stats] [ literal[string] ])
identifier[self] . identifier[publish] ( literal[string] , identifier[dict_stats] [ literal[string] ])
identifier[self] . identifier[publish] ( literal[string] ,
identifier[dict_stats] [ literal[string] ])
identifier[self] . identifier[publish] ( literal[string] , identifier[queue_stats] [
literal[string] ])
identifier[self] . identifier[publish] ( literal[string] , identifier[queue_stats] [
literal[string] ]) | def collect(self):
"""
Collector Passenger stats
"""
if not os.access(self.config['bin'], os.X_OK):
self.log.error('Path %s does not exist or is not executable', self.config['bin'])
return {} # depends on [control=['if'], data=[]]
dict_stats = self.get_passenger_memory_stats()
if len(dict_stats.keys()) == 0:
return {} # depends on [control=['if'], data=[]]
queue_stats = self.get_passenger_queue_stats()
if len(queue_stats.keys()) == 0:
return {} # depends on [control=['if'], data=[]]
overall_cpu = self.get_passenger_cpu_usage(dict_stats)
if overall_cpu >= 0:
self.publish('phusion_passenger_cpu', overall_cpu) # depends on [control=['if'], data=['overall_cpu']]
self.publish('total_passenger_procs', len(dict_stats['passenger_procs']))
self.publish('total_nginx_procs', len(dict_stats['nginx_procs']))
self.publish('total_apache_procs', len(dict_stats['apache_procs']))
self.publish('total_apache_memory', dict_stats['apache_mem_total'])
self.publish('total_nginx_memory', dict_stats['nginx_mem_total'])
self.publish('total_passenger_memory', dict_stats['passenger_mem_total'])
self.publish('top_level_queue_size', queue_stats['top_level_queue_size'])
self.publish('passenger_queue_size', queue_stats['passenger_queue_size']) |
def check_gas_reserve(raiden):
""" Check periodically for gas reserve in the account """
while True:
has_enough_balance, estimated_required_balance = gas_reserve.has_enough_gas_reserve(
raiden,
channels_to_open=1,
)
estimated_required_balance_eth = Web3.fromWei(estimated_required_balance, 'ether')
if not has_enough_balance:
log.info('Missing gas reserve', required_wei=estimated_required_balance)
click.secho(
(
'WARNING\n'
"Your account's balance is below the estimated gas reserve of "
f'{estimated_required_balance_eth} eth. This may lead to a loss of '
'of funds because your account will be unable to perform on-chain '
'transactions. Please add funds to your account as soon as possible.'
),
fg='red',
)
gevent.sleep(CHECK_GAS_RESERVE_INTERVAL) | def function[check_gas_reserve, parameter[raiden]]:
constant[ Check periodically for gas reserve in the account ]
while constant[True] begin[:]
<ast.Tuple object at 0x7da1b1711ba0> assign[=] call[name[gas_reserve].has_enough_gas_reserve, parameter[name[raiden]]]
variable[estimated_required_balance_eth] assign[=] call[name[Web3].fromWei, parameter[name[estimated_required_balance], constant[ether]]]
if <ast.UnaryOp object at 0x7da1b1711660> begin[:]
call[name[log].info, parameter[constant[Missing gas reserve]]]
call[name[click].secho, parameter[<ast.JoinedStr object at 0x7da1b1711d80>]]
call[name[gevent].sleep, parameter[name[CHECK_GAS_RESERVE_INTERVAL]]] | keyword[def] identifier[check_gas_reserve] ( identifier[raiden] ):
literal[string]
keyword[while] keyword[True] :
identifier[has_enough_balance] , identifier[estimated_required_balance] = identifier[gas_reserve] . identifier[has_enough_gas_reserve] (
identifier[raiden] ,
identifier[channels_to_open] = literal[int] ,
)
identifier[estimated_required_balance_eth] = identifier[Web3] . identifier[fromWei] ( identifier[estimated_required_balance] , literal[string] )
keyword[if] keyword[not] identifier[has_enough_balance] :
identifier[log] . identifier[info] ( literal[string] , identifier[required_wei] = identifier[estimated_required_balance] )
identifier[click] . identifier[secho] (
(
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
),
identifier[fg] = literal[string] ,
)
identifier[gevent] . identifier[sleep] ( identifier[CHECK_GAS_RESERVE_INTERVAL] ) | def check_gas_reserve(raiden):
""" Check periodically for gas reserve in the account """
while True:
(has_enough_balance, estimated_required_balance) = gas_reserve.has_enough_gas_reserve(raiden, channels_to_open=1)
estimated_required_balance_eth = Web3.fromWei(estimated_required_balance, 'ether')
if not has_enough_balance:
log.info('Missing gas reserve', required_wei=estimated_required_balance)
click.secho(f"WARNING\nYour account's balance is below the estimated gas reserve of {estimated_required_balance_eth} eth. This may lead to a loss of of funds because your account will be unable to perform on-chain transactions. Please add funds to your account as soon as possible.", fg='red') # depends on [control=['if'], data=[]]
gevent.sleep(CHECK_GAS_RESERVE_INTERVAL) # depends on [control=['while'], data=[]] |
def _stdout_filed(func):
"""
Instance method decorator to convert an optional file keyword
argument into an actual value, whether it be a passed value, a
value obtained from an io_manager, or sys.stdout.
"""
def wrapper(self, file=None):
if file:
return func(self, file=file)
elif self.io_manager:
with self.io_manager.with_stdout() as stdout:
return func(self, file=stdout)
else:
return func(self, file=sys.stdout)
wrapper.__doc__ = func.__doc__
return wrapper | def function[_stdout_filed, parameter[func]]:
constant[
Instance method decorator to convert an optional file keyword
argument into an actual value, whether it be a passed value, a
value obtained from an io_manager, or sys.stdout.
]
def function[wrapper, parameter[self, file]]:
if name[file] begin[:]
return[call[name[func], parameter[name[self]]]]
name[wrapper].__doc__ assign[=] name[func].__doc__
return[name[wrapper]] | keyword[def] identifier[_stdout_filed] ( identifier[func] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[self] , identifier[file] = keyword[None] ):
keyword[if] identifier[file] :
keyword[return] identifier[func] ( identifier[self] , identifier[file] = identifier[file] )
keyword[elif] identifier[self] . identifier[io_manager] :
keyword[with] identifier[self] . identifier[io_manager] . identifier[with_stdout] () keyword[as] identifier[stdout] :
keyword[return] identifier[func] ( identifier[self] , identifier[file] = identifier[stdout] )
keyword[else] :
keyword[return] identifier[func] ( identifier[self] , identifier[file] = identifier[sys] . identifier[stdout] )
identifier[wrapper] . identifier[__doc__] = identifier[func] . identifier[__doc__]
keyword[return] identifier[wrapper] | def _stdout_filed(func):
"""
Instance method decorator to convert an optional file keyword
argument into an actual value, whether it be a passed value, a
value obtained from an io_manager, or sys.stdout.
"""
def wrapper(self, file=None):
if file:
return func(self, file=file) # depends on [control=['if'], data=[]]
elif self.io_manager:
with self.io_manager.with_stdout() as stdout:
return func(self, file=stdout) # depends on [control=['with'], data=['stdout']] # depends on [control=['if'], data=[]]
else:
return func(self, file=sys.stdout)
wrapper.__doc__ = func.__doc__
return wrapper |
def encrypt(self, data, *recipients, **kwargs):
"""Encrypt the message contained in ``data`` to ``recipients``.
:param str data: The file or bytestream to encrypt.
:param str recipients: The recipients to encrypt to. Recipients must
be specified keyID/fingerprint. Care should be taken in Python2.x
to make sure that the given fingerprint is in fact a string and
not a unicode object. Multiple recipients may be specified by
doing ``GPG.encrypt(data, fpr1, fpr2, fpr3)`` etc.
:param str default_key: The keyID/fingerprint of the key to use for
signing. If given, ``data`` will be encrypted and signed.
:param str passphrase: If given, and ``default_key`` is also given,
use this passphrase to unlock the secret portion of the
``default_key`` to sign the encrypted ``data``. Otherwise, if
``default_key`` is not given, but ``symmetric=True``, then use
this passphrase as the passphrase for symmetric
encryption. Signing and symmetric encryption should *not* be
combined when sending the ``data`` to other recipients, else the
passphrase to the secret key would be shared with them.
:param bool armor: If True, ascii armor the output; otherwise, the
output will be in binary format. (Default: True)
:param bool encrypt: If True, encrypt the ``data`` using the
``recipients`` public keys. (Default: True)
:param bool symmetric: If True, encrypt the ``data`` to ``recipients``
using a symmetric key. See the ``passphrase`` parameter. Symmetric
encryption and public key encryption can be used simultaneously,
and will result in a ciphertext which is decryptable with either
the symmetric ``passphrase`` or one of the corresponding private
keys.
:param bool always_trust: If True, ignore trust warnings on recipient
keys. If False, display trust warnings. (default: True)
:param str output: The output file to write to. If not specified, the
encrypted output is returned, and thus should be stored as an
object in Python. For example:
>>> import shutil
>>> import gnupg
>>> if os.path.exists("doctests"):
... shutil.rmtree("doctests")
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_settings = gpg.gen_key_input(key_type='RSA',
... key_length=1024,
... key_usage='ESCA',
... passphrase='foo')
>>> key = gpg.gen_key(key_settings)
>>> message = "The crow flies at midnight."
>>> encrypted = str(gpg.encrypt(message, key.fingerprint))
>>> assert encrypted != message
>>> assert not encrypted.isspace()
>>> decrypted = str(gpg.decrypt(encrypted, passphrase='foo'))
>>> assert not decrypted.isspace()
>>> decrypted
'The crow flies at midnight.'
:param bool throw_keyids: If True, make all **recipients** keyids be
zero'd out in packet information. This is the same as using
**hidden_recipients** for all **recipients**. (Default: False).
:param list hidden_recipients: A list of recipients that should have
their keyids zero'd out in packet information.
:param str cipher_algo: The cipher algorithm to use. To see available
algorithms with your version of GnuPG, do:
:command:`$ gpg --with-colons --list-config ciphername`.
The default ``cipher_algo``, if unspecified, is ``'AES256'``.
:param str digest_algo: The hash digest to use. Again, to see which
hashes your GnuPG is capable of using, do:
:command:`$ gpg --with-colons --list-config digestname`.
The default, if unspecified, is ``'SHA512'``.
:param str compress_algo: The compression algorithm to use. Can be one
of ``'ZLIB'``, ``'BZIP2'``, ``'ZIP'``, or ``'Uncompressed'``.
.. seealso:: :meth:`._encrypt`
"""
if _is_stream(data):
stream = data
else:
stream = _make_binary_stream(data, self._encoding)
result = self._encrypt(stream, recipients, **kwargs)
stream.close()
return result | def function[encrypt, parameter[self, data]]:
constant[Encrypt the message contained in ``data`` to ``recipients``.
:param str data: The file or bytestream to encrypt.
:param str recipients: The recipients to encrypt to. Recipients must
be specified keyID/fingerprint. Care should be taken in Python2.x
to make sure that the given fingerprint is in fact a string and
not a unicode object. Multiple recipients may be specified by
doing ``GPG.encrypt(data, fpr1, fpr2, fpr3)`` etc.
:param str default_key: The keyID/fingerprint of the key to use for
signing. If given, ``data`` will be encrypted and signed.
:param str passphrase: If given, and ``default_key`` is also given,
use this passphrase to unlock the secret portion of the
``default_key`` to sign the encrypted ``data``. Otherwise, if
``default_key`` is not given, but ``symmetric=True``, then use
this passphrase as the passphrase for symmetric
encryption. Signing and symmetric encryption should *not* be
combined when sending the ``data`` to other recipients, else the
passphrase to the secret key would be shared with them.
:param bool armor: If True, ascii armor the output; otherwise, the
output will be in binary format. (Default: True)
:param bool encrypt: If True, encrypt the ``data`` using the
``recipients`` public keys. (Default: True)
:param bool symmetric: If True, encrypt the ``data`` to ``recipients``
using a symmetric key. See the ``passphrase`` parameter. Symmetric
encryption and public key encryption can be used simultaneously,
and will result in a ciphertext which is decryptable with either
the symmetric ``passphrase`` or one of the corresponding private
keys.
:param bool always_trust: If True, ignore trust warnings on recipient
keys. If False, display trust warnings. (default: True)
:param str output: The output file to write to. If not specified, the
encrypted output is returned, and thus should be stored as an
object in Python. For example:
>>> import shutil
>>> import gnupg
>>> if os.path.exists("doctests"):
... shutil.rmtree("doctests")
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_settings = gpg.gen_key_input(key_type='RSA',
... key_length=1024,
... key_usage='ESCA',
... passphrase='foo')
>>> key = gpg.gen_key(key_settings)
>>> message = "The crow flies at midnight."
>>> encrypted = str(gpg.encrypt(message, key.fingerprint))
>>> assert encrypted != message
>>> assert not encrypted.isspace()
>>> decrypted = str(gpg.decrypt(encrypted, passphrase='foo'))
>>> assert not decrypted.isspace()
>>> decrypted
'The crow flies at midnight.'
:param bool throw_keyids: If True, make all **recipients** keyids be
zero'd out in packet information. This is the same as using
**hidden_recipients** for all **recipients**. (Default: False).
:param list hidden_recipients: A list of recipients that should have
their keyids zero'd out in packet information.
:param str cipher_algo: The cipher algorithm to use. To see available
algorithms with your version of GnuPG, do:
:command:`$ gpg --with-colons --list-config ciphername`.
The default ``cipher_algo``, if unspecified, is ``'AES256'``.
:param str digest_algo: The hash digest to use. Again, to see which
hashes your GnuPG is capable of using, do:
:command:`$ gpg --with-colons --list-config digestname`.
The default, if unspecified, is ``'SHA512'``.
:param str compress_algo: The compression algorithm to use. Can be one
of ``'ZLIB'``, ``'BZIP2'``, ``'ZIP'``, or ``'Uncompressed'``.
.. seealso:: :meth:`._encrypt`
]
if call[name[_is_stream], parameter[name[data]]] begin[:]
variable[stream] assign[=] name[data]
variable[result] assign[=] call[name[self]._encrypt, parameter[name[stream], name[recipients]]]
call[name[stream].close, parameter[]]
return[name[result]] | keyword[def] identifier[encrypt] ( identifier[self] , identifier[data] ,* identifier[recipients] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[_is_stream] ( identifier[data] ):
identifier[stream] = identifier[data]
keyword[else] :
identifier[stream] = identifier[_make_binary_stream] ( identifier[data] , identifier[self] . identifier[_encoding] )
identifier[result] = identifier[self] . identifier[_encrypt] ( identifier[stream] , identifier[recipients] ,** identifier[kwargs] )
identifier[stream] . identifier[close] ()
keyword[return] identifier[result] | def encrypt(self, data, *recipients, **kwargs):
"""Encrypt the message contained in ``data`` to ``recipients``.
:param str data: The file or bytestream to encrypt.
:param str recipients: The recipients to encrypt to. Recipients must
be specified keyID/fingerprint. Care should be taken in Python2.x
to make sure that the given fingerprint is in fact a string and
not a unicode object. Multiple recipients may be specified by
doing ``GPG.encrypt(data, fpr1, fpr2, fpr3)`` etc.
:param str default_key: The keyID/fingerprint of the key to use for
signing. If given, ``data`` will be encrypted and signed.
:param str passphrase: If given, and ``default_key`` is also given,
use this passphrase to unlock the secret portion of the
``default_key`` to sign the encrypted ``data``. Otherwise, if
``default_key`` is not given, but ``symmetric=True``, then use
this passphrase as the passphrase for symmetric
encryption. Signing and symmetric encryption should *not* be
combined when sending the ``data`` to other recipients, else the
passphrase to the secret key would be shared with them.
:param bool armor: If True, ascii armor the output; otherwise, the
output will be in binary format. (Default: True)
:param bool encrypt: If True, encrypt the ``data`` using the
``recipients`` public keys. (Default: True)
:param bool symmetric: If True, encrypt the ``data`` to ``recipients``
using a symmetric key. See the ``passphrase`` parameter. Symmetric
encryption and public key encryption can be used simultaneously,
and will result in a ciphertext which is decryptable with either
the symmetric ``passphrase`` or one of the corresponding private
keys.
:param bool always_trust: If True, ignore trust warnings on recipient
keys. If False, display trust warnings. (default: True)
:param str output: The output file to write to. If not specified, the
encrypted output is returned, and thus should be stored as an
object in Python. For example:
>>> import shutil
>>> import gnupg
>>> if os.path.exists("doctests"):
... shutil.rmtree("doctests")
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_settings = gpg.gen_key_input(key_type='RSA',
... key_length=1024,
... key_usage='ESCA',
... passphrase='foo')
>>> key = gpg.gen_key(key_settings)
>>> message = "The crow flies at midnight."
>>> encrypted = str(gpg.encrypt(message, key.fingerprint))
>>> assert encrypted != message
>>> assert not encrypted.isspace()
>>> decrypted = str(gpg.decrypt(encrypted, passphrase='foo'))
>>> assert not decrypted.isspace()
>>> decrypted
'The crow flies at midnight.'
:param bool throw_keyids: If True, make all **recipients** keyids be
zero'd out in packet information. This is the same as using
**hidden_recipients** for all **recipients**. (Default: False).
:param list hidden_recipients: A list of recipients that should have
their keyids zero'd out in packet information.
:param str cipher_algo: The cipher algorithm to use. To see available
algorithms with your version of GnuPG, do:
:command:`$ gpg --with-colons --list-config ciphername`.
The default ``cipher_algo``, if unspecified, is ``'AES256'``.
:param str digest_algo: The hash digest to use. Again, to see which
hashes your GnuPG is capable of using, do:
:command:`$ gpg --with-colons --list-config digestname`.
The default, if unspecified, is ``'SHA512'``.
:param str compress_algo: The compression algorithm to use. Can be one
of ``'ZLIB'``, ``'BZIP2'``, ``'ZIP'``, or ``'Uncompressed'``.
.. seealso:: :meth:`._encrypt`
"""
if _is_stream(data):
stream = data # depends on [control=['if'], data=[]]
else:
stream = _make_binary_stream(data, self._encoding)
result = self._encrypt(stream, recipients, **kwargs)
stream.close()
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.