code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def aliased(aliased_class):
"""
Decorator function that *must* be used in combination with @alias
decorator. This class will make the magic happen!
@aliased classes will have their aliased method (via @alias) actually
aliased.
This method simply iterates over the member attributes of 'aliased_class'
seeking for those which have an '_aliases' attribute and then defines new
members in the class using those aliases as mere pointer functions to the
original ones.
Usage:
>>> @aliased
... class MyClass(object):
... @alias('coolMethod', 'myKinkyMethod')
... def boring_method(self):
... pass
...
... @property
... @alias('my_prop_alias')
... def my_prop(self):
... return "hi"
>>> i = MyClass()
>>> i.coolMethod() # equivalent to i.myKinkyMethod() and i.boring_method()
>>> i.my_prop == i.my_prop_alias
True
"""
original_methods = aliased_class.__dict__.copy()
original_methods_set = set(original_methods)
for name, method in original_methods.items():
aliases = None
if isinstance(method, property) and hasattr(method.fget, '_aliases'):
aliases = method.fget._aliases
elif hasattr(method, '_aliases'):
aliases = method._aliases
if aliases:
# Add the aliases for 'method', but don't override any
# previously-defined attribute of 'aliased_class'
for alias in aliases - original_methods_set:
setattr(aliased_class, alias, method)
return aliased_class | def function[aliased, parameter[aliased_class]]:
constant[
Decorator function that *must* be used in combination with @alias
decorator. This class will make the magic happen!
@aliased classes will have their aliased method (via @alias) actually
aliased.
This method simply iterates over the member attributes of 'aliased_class'
seeking for those which have an '_aliases' attribute and then defines new
members in the class using those aliases as mere pointer functions to the
original ones.
Usage:
>>> @aliased
... class MyClass(object):
... @alias('coolMethod', 'myKinkyMethod')
... def boring_method(self):
... pass
...
... @property
... @alias('my_prop_alias')
... def my_prop(self):
... return "hi"
>>> i = MyClass()
>>> i.coolMethod() # equivalent to i.myKinkyMethod() and i.boring_method()
>>> i.my_prop == i.my_prop_alias
True
]
variable[original_methods] assign[=] call[name[aliased_class].__dict__.copy, parameter[]]
variable[original_methods_set] assign[=] call[name[set], parameter[name[original_methods]]]
for taget[tuple[[<ast.Name object at 0x7da20c6e5b40>, <ast.Name object at 0x7da20c6e6a40>]]] in starred[call[name[original_methods].items, parameter[]]] begin[:]
variable[aliases] assign[=] constant[None]
if <ast.BoolOp object at 0x7da20c6e7a60> begin[:]
variable[aliases] assign[=] name[method].fget._aliases
if name[aliases] begin[:]
for taget[name[alias]] in starred[binary_operation[name[aliases] - name[original_methods_set]]] begin[:]
call[name[setattr], parameter[name[aliased_class], name[alias], name[method]]]
return[name[aliased_class]] | keyword[def] identifier[aliased] ( identifier[aliased_class] ):
literal[string]
identifier[original_methods] = identifier[aliased_class] . identifier[__dict__] . identifier[copy] ()
identifier[original_methods_set] = identifier[set] ( identifier[original_methods] )
keyword[for] identifier[name] , identifier[method] keyword[in] identifier[original_methods] . identifier[items] ():
identifier[aliases] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[method] , identifier[property] ) keyword[and] identifier[hasattr] ( identifier[method] . identifier[fget] , literal[string] ):
identifier[aliases] = identifier[method] . identifier[fget] . identifier[_aliases]
keyword[elif] identifier[hasattr] ( identifier[method] , literal[string] ):
identifier[aliases] = identifier[method] . identifier[_aliases]
keyword[if] identifier[aliases] :
keyword[for] identifier[alias] keyword[in] identifier[aliases] - identifier[original_methods_set] :
identifier[setattr] ( identifier[aliased_class] , identifier[alias] , identifier[method] )
keyword[return] identifier[aliased_class] | def aliased(aliased_class):
"""
Decorator function that *must* be used in combination with @alias
decorator. This class will make the magic happen!
@aliased classes will have their aliased method (via @alias) actually
aliased.
This method simply iterates over the member attributes of 'aliased_class'
seeking for those which have an '_aliases' attribute and then defines new
members in the class using those aliases as mere pointer functions to the
original ones.
Usage:
>>> @aliased
... class MyClass(object):
... @alias('coolMethod', 'myKinkyMethod')
... def boring_method(self):
... pass
...
... @property
... @alias('my_prop_alias')
... def my_prop(self):
... return "hi"
>>> i = MyClass()
>>> i.coolMethod() # equivalent to i.myKinkyMethod() and i.boring_method()
>>> i.my_prop == i.my_prop_alias
True
"""
original_methods = aliased_class.__dict__.copy()
original_methods_set = set(original_methods)
for (name, method) in original_methods.items():
aliases = None
if isinstance(method, property) and hasattr(method.fget, '_aliases'):
aliases = method.fget._aliases # depends on [control=['if'], data=[]]
elif hasattr(method, '_aliases'):
aliases = method._aliases # depends on [control=['if'], data=[]]
if aliases:
# Add the aliases for 'method', but don't override any
# previously-defined attribute of 'aliased_class'
for alias in aliases - original_methods_set:
setattr(aliased_class, alias, method) # depends on [control=['for'], data=['alias']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return aliased_class |
def prepare(self, data_source):
"""
Called with the training data.
@param data_source: Either a pandas.DataFrame or a file-like object.
"""
dataframe = self.__get_dataframe(data_source, use_target=True)
self.__config.get_data_model().set_features_types_from_dataframe(dataframe)
dataframe = self.__cleaner.prepare(dataframe)
return self.__transformer.prepare(dataframe) | def function[prepare, parameter[self, data_source]]:
constant[
Called with the training data.
@param data_source: Either a pandas.DataFrame or a file-like object.
]
variable[dataframe] assign[=] call[name[self].__get_dataframe, parameter[name[data_source]]]
call[call[name[self].__config.get_data_model, parameter[]].set_features_types_from_dataframe, parameter[name[dataframe]]]
variable[dataframe] assign[=] call[name[self].__cleaner.prepare, parameter[name[dataframe]]]
return[call[name[self].__transformer.prepare, parameter[name[dataframe]]]] | keyword[def] identifier[prepare] ( identifier[self] , identifier[data_source] ):
literal[string]
identifier[dataframe] = identifier[self] . identifier[__get_dataframe] ( identifier[data_source] , identifier[use_target] = keyword[True] )
identifier[self] . identifier[__config] . identifier[get_data_model] (). identifier[set_features_types_from_dataframe] ( identifier[dataframe] )
identifier[dataframe] = identifier[self] . identifier[__cleaner] . identifier[prepare] ( identifier[dataframe] )
keyword[return] identifier[self] . identifier[__transformer] . identifier[prepare] ( identifier[dataframe] ) | def prepare(self, data_source):
"""
Called with the training data.
@param data_source: Either a pandas.DataFrame or a file-like object.
"""
dataframe = self.__get_dataframe(data_source, use_target=True)
self.__config.get_data_model().set_features_types_from_dataframe(dataframe)
dataframe = self.__cleaner.prepare(dataframe)
return self.__transformer.prepare(dataframe) |
def r_dts_collection(self, objectId=None):
""" DTS Collection Metadata reply for given objectId
:param objectId: Collection Identifier
:return: JSON Format of DTS Collection
"""
try:
j = self.resolver.getMetadata(objectId=objectId).export(Mimetypes.JSON.DTS.Std)
j = jsonify(j)
j.status_code = 200
except NautilusError as E:
return self.dts_error(error_name=E.__class__.__name__, message=E.__doc__)
return j | def function[r_dts_collection, parameter[self, objectId]]:
constant[ DTS Collection Metadata reply for given objectId
:param objectId: Collection Identifier
:return: JSON Format of DTS Collection
]
<ast.Try object at 0x7da204621060>
return[name[j]] | keyword[def] identifier[r_dts_collection] ( identifier[self] , identifier[objectId] = keyword[None] ):
literal[string]
keyword[try] :
identifier[j] = identifier[self] . identifier[resolver] . identifier[getMetadata] ( identifier[objectId] = identifier[objectId] ). identifier[export] ( identifier[Mimetypes] . identifier[JSON] . identifier[DTS] . identifier[Std] )
identifier[j] = identifier[jsonify] ( identifier[j] )
identifier[j] . identifier[status_code] = literal[int]
keyword[except] identifier[NautilusError] keyword[as] identifier[E] :
keyword[return] identifier[self] . identifier[dts_error] ( identifier[error_name] = identifier[E] . identifier[__class__] . identifier[__name__] , identifier[message] = identifier[E] . identifier[__doc__] )
keyword[return] identifier[j] | def r_dts_collection(self, objectId=None):
""" DTS Collection Metadata reply for given objectId
:param objectId: Collection Identifier
:return: JSON Format of DTS Collection
"""
try:
j = self.resolver.getMetadata(objectId=objectId).export(Mimetypes.JSON.DTS.Std)
j = jsonify(j)
j.status_code = 200 # depends on [control=['try'], data=[]]
except NautilusError as E:
return self.dts_error(error_name=E.__class__.__name__, message=E.__doc__) # depends on [control=['except'], data=['E']]
return j |
def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True)
else:
print(msg) | def function[on_message, parameter[self, handler, msg]]:
constant[ In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
]
if name[self].remote_debugging begin[:]
for taget[name[h]] in starred[name[self].handlers] begin[:]
if compare[name[h] not_equal[!=] name[handler]] begin[:]
call[name[h].write_message, parameter[name[msg], constant[True]]] | keyword[def] identifier[on_message] ( identifier[self] , identifier[handler] , identifier[msg] ):
literal[string]
keyword[if] identifier[self] . identifier[remote_debugging] :
keyword[for] identifier[h] keyword[in] identifier[self] . identifier[handlers] :
keyword[if] identifier[h] != identifier[handler] :
identifier[h] . identifier[write_message] ( identifier[msg] , keyword[True] )
keyword[else] :
identifier[print] ( identifier[msg] ) | def on_message(self, handler, msg):
""" In remote debugging mode this simply acts as a forwarding
proxy for the two clients.
"""
if self.remote_debugging:
#: Forward to other clients
for h in self.handlers:
if h != handler:
h.write_message(msg, True) # depends on [control=['if'], data=['h']] # depends on [control=['for'], data=['h']] # depends on [control=['if'], data=[]]
else:
print(msg) |
def gen_file_lines(path, mode='rUb', strip_eol=True, ascii=True, eol='\n'):
"""Generate a sequence of "documents" from the lines in a file
Arguments:
path (file or str): path to a file or an open file_obj ready to be read
mode (str): file mode to open a file in
strip_eol (bool): whether to strip the EOL char from lines as they are read/generated/yielded
ascii (bool): whether to use the stringify and to_ascii functions on each line
eol (str): UNUSED character delimitting lines in the file
TODO:
Use `eol` to split lines (currently ignored because use `file.readline` doesn't have EOL arg)
"""
if isinstance(path, str):
path = open(path, mode)
with path:
# TODO: read one char at a time looking for the eol char and yielding the interveening chars
for line in path:
if ascii:
line = str(line)
if strip_eol:
line = line.rstrip('\n')
yield line | def function[gen_file_lines, parameter[path, mode, strip_eol, ascii, eol]]:
constant[Generate a sequence of "documents" from the lines in a file
Arguments:
path (file or str): path to a file or an open file_obj ready to be read
mode (str): file mode to open a file in
strip_eol (bool): whether to strip the EOL char from lines as they are read/generated/yielded
ascii (bool): whether to use the stringify and to_ascii functions on each line
eol (str): UNUSED character delimitting lines in the file
TODO:
Use `eol` to split lines (currently ignored because use `file.readline` doesn't have EOL arg)
]
if call[name[isinstance], parameter[name[path], name[str]]] begin[:]
variable[path] assign[=] call[name[open], parameter[name[path], name[mode]]]
with name[path] begin[:]
for taget[name[line]] in starred[name[path]] begin[:]
if name[ascii] begin[:]
variable[line] assign[=] call[name[str], parameter[name[line]]]
if name[strip_eol] begin[:]
variable[line] assign[=] call[name[line].rstrip, parameter[constant[
]]]
<ast.Yield object at 0x7da20c6a9c60> | keyword[def] identifier[gen_file_lines] ( identifier[path] , identifier[mode] = literal[string] , identifier[strip_eol] = keyword[True] , identifier[ascii] = keyword[True] , identifier[eol] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[path] , identifier[str] ):
identifier[path] = identifier[open] ( identifier[path] , identifier[mode] )
keyword[with] identifier[path] :
keyword[for] identifier[line] keyword[in] identifier[path] :
keyword[if] identifier[ascii] :
identifier[line] = identifier[str] ( identifier[line] )
keyword[if] identifier[strip_eol] :
identifier[line] = identifier[line] . identifier[rstrip] ( literal[string] )
keyword[yield] identifier[line] | def gen_file_lines(path, mode='rUb', strip_eol=True, ascii=True, eol='\n'):
"""Generate a sequence of "documents" from the lines in a file
Arguments:
path (file or str): path to a file or an open file_obj ready to be read
mode (str): file mode to open a file in
strip_eol (bool): whether to strip the EOL char from lines as they are read/generated/yielded
ascii (bool): whether to use the stringify and to_ascii functions on each line
eol (str): UNUSED character delimitting lines in the file
TODO:
Use `eol` to split lines (currently ignored because use `file.readline` doesn't have EOL arg)
"""
if isinstance(path, str):
path = open(path, mode) # depends on [control=['if'], data=[]]
with path:
# TODO: read one char at a time looking for the eol char and yielding the interveening chars
for line in path:
if ascii:
line = str(line) # depends on [control=['if'], data=[]]
if strip_eol:
line = line.rstrip('\n') # depends on [control=['if'], data=[]]
yield line # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=[]] |
def main(testfiles=None, action=printer):
"""testfiles can be None, in which case the command line arguments are used as filenames.
testfiles can be a string, in which case that file is parsed.
testfiles can be a list.
In all cases, the filenames will be globbed.
If more than one file is parsed successfully, a dictionary of ParseResults is returned.
Otherwise, a simple ParseResults is returned.
"""
testfiles = get_filename_list(testfiles)
print(testfiles)
if action:
for i in (simple_identifier, value, item_list):
i.setParseAction(action)
success = 0
failures = []
retval = {}
for f in testfiles:
try:
retval[f] = object_definition.parseFile(f)
success += 1
except Exception:
failures.append(f)
if failures:
print('\nfailed while processing %s' % ', '.join(failures))
print('\nsucceeded on %d of %d files' %(success, len(testfiles)))
if len(retval) == 1 and len(testfiles) == 1:
# if only one file is parsed, return the parseResults directly
return retval[list(retval.keys())[0]]
# else, return a dictionary of parseResults
return retval | def function[main, parameter[testfiles, action]]:
constant[testfiles can be None, in which case the command line arguments are used as filenames.
testfiles can be a string, in which case that file is parsed.
testfiles can be a list.
In all cases, the filenames will be globbed.
If more than one file is parsed successfully, a dictionary of ParseResults is returned.
Otherwise, a simple ParseResults is returned.
]
variable[testfiles] assign[=] call[name[get_filename_list], parameter[name[testfiles]]]
call[name[print], parameter[name[testfiles]]]
if name[action] begin[:]
for taget[name[i]] in starred[tuple[[<ast.Name object at 0x7da18c4ceb00>, <ast.Name object at 0x7da18c4cc580>, <ast.Name object at 0x7da18c4cc8b0>]]] begin[:]
call[name[i].setParseAction, parameter[name[action]]]
variable[success] assign[=] constant[0]
variable[failures] assign[=] list[[]]
variable[retval] assign[=] dictionary[[], []]
for taget[name[f]] in starred[name[testfiles]] begin[:]
<ast.Try object at 0x7da18c4cef20>
if name[failures] begin[:]
call[name[print], parameter[binary_operation[constant[
failed while processing %s] <ast.Mod object at 0x7da2590d6920> call[constant[, ].join, parameter[name[failures]]]]]]
call[name[print], parameter[binary_operation[constant[
succeeded on %d of %d files] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18c4cf7f0>, <ast.Call object at 0x7da18c4cee30>]]]]]
if <ast.BoolOp object at 0x7da18c4ce200> begin[:]
return[call[name[retval]][call[call[name[list], parameter[call[name[retval].keys, parameter[]]]]][constant[0]]]]
return[name[retval]] | keyword[def] identifier[main] ( identifier[testfiles] = keyword[None] , identifier[action] = identifier[printer] ):
literal[string]
identifier[testfiles] = identifier[get_filename_list] ( identifier[testfiles] )
identifier[print] ( identifier[testfiles] )
keyword[if] identifier[action] :
keyword[for] identifier[i] keyword[in] ( identifier[simple_identifier] , identifier[value] , identifier[item_list] ):
identifier[i] . identifier[setParseAction] ( identifier[action] )
identifier[success] = literal[int]
identifier[failures] =[]
identifier[retval] ={}
keyword[for] identifier[f] keyword[in] identifier[testfiles] :
keyword[try] :
identifier[retval] [ identifier[f] ]= identifier[object_definition] . identifier[parseFile] ( identifier[f] )
identifier[success] += literal[int]
keyword[except] identifier[Exception] :
identifier[failures] . identifier[append] ( identifier[f] )
keyword[if] identifier[failures] :
identifier[print] ( literal[string] % literal[string] . identifier[join] ( identifier[failures] ))
identifier[print] ( literal[string] %( identifier[success] , identifier[len] ( identifier[testfiles] )))
keyword[if] identifier[len] ( identifier[retval] )== literal[int] keyword[and] identifier[len] ( identifier[testfiles] )== literal[int] :
keyword[return] identifier[retval] [ identifier[list] ( identifier[retval] . identifier[keys] ())[ literal[int] ]]
keyword[return] identifier[retval] | def main(testfiles=None, action=printer):
"""testfiles can be None, in which case the command line arguments are used as filenames.
testfiles can be a string, in which case that file is parsed.
testfiles can be a list.
In all cases, the filenames will be globbed.
If more than one file is parsed successfully, a dictionary of ParseResults is returned.
Otherwise, a simple ParseResults is returned.
"""
testfiles = get_filename_list(testfiles)
print(testfiles)
if action:
for i in (simple_identifier, value, item_list):
i.setParseAction(action) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
success = 0
failures = []
retval = {}
for f in testfiles:
try:
retval[f] = object_definition.parseFile(f)
success += 1 # depends on [control=['try'], data=[]]
except Exception:
failures.append(f) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['f']]
if failures:
print('\nfailed while processing %s' % ', '.join(failures)) # depends on [control=['if'], data=[]]
print('\nsucceeded on %d of %d files' % (success, len(testfiles)))
if len(retval) == 1 and len(testfiles) == 1: # if only one file is parsed, return the parseResults directly
return retval[list(retval.keys())[0]] # depends on [control=['if'], data=[]] # else, return a dictionary of parseResults
return retval |
def persist(self, name, project=None, drop_model=False, **kwargs):
"""
Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation
"""
return super(ODPSModelExpr, self).persist(name, project=project, drop_model=drop_model, **kwargs) | def function[persist, parameter[self, name, project, drop_model]]:
constant[
Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation
]
return[call[call[name[super], parameter[name[ODPSModelExpr], name[self]]].persist, parameter[name[name]]]] | keyword[def] identifier[persist] ( identifier[self] , identifier[name] , identifier[project] = keyword[None] , identifier[drop_model] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[super] ( identifier[ODPSModelExpr] , identifier[self] ). identifier[persist] ( identifier[name] , identifier[project] = identifier[project] , identifier[drop_model] = identifier[drop_model] ,** identifier[kwargs] ) | def persist(self, name, project=None, drop_model=False, **kwargs):
"""
Persist the execution into a new model.
:param name: model name
:param project: name of the project
:param drop_model: drop model before creation
"""
return super(ODPSModelExpr, self).persist(name, project=project, drop_model=drop_model, **kwargs) |
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache | def function[enablePackrat, parameter[]]:
constant[Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
]
if <ast.UnaryOp object at 0x7da2047ea4d0> begin[:]
name[ParserElement]._packratEnabled assign[=] constant[True]
name[ParserElement]._parse assign[=] name[ParserElement]._parseCache | keyword[def] identifier[enablePackrat] ():
literal[string]
keyword[if] keyword[not] identifier[ParserElement] . identifier[_packratEnabled] :
identifier[ParserElement] . identifier[_packratEnabled] = keyword[True]
identifier[ParserElement] . identifier[_parse] = identifier[ParserElement] . identifier[_parseCache] | def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache # depends on [control=['if'], data=[]] |
def get_counter(data, base):
"""
See setCounters() / getCounters() methods in IJ source, ij/gui/PointRoi.java.
"""
b0 = data[base]
b1 = data[base + 1]
b2 = data[base + 2]
b3 = data[base + 3]
counter = b3
position = (b1 << 8) + b2
return counter, position | def function[get_counter, parameter[data, base]]:
constant[
See setCounters() / getCounters() methods in IJ source, ij/gui/PointRoi.java.
]
variable[b0] assign[=] call[name[data]][name[base]]
variable[b1] assign[=] call[name[data]][binary_operation[name[base] + constant[1]]]
variable[b2] assign[=] call[name[data]][binary_operation[name[base] + constant[2]]]
variable[b3] assign[=] call[name[data]][binary_operation[name[base] + constant[3]]]
variable[counter] assign[=] name[b3]
variable[position] assign[=] binary_operation[binary_operation[name[b1] <ast.LShift object at 0x7da2590d69e0> constant[8]] + name[b2]]
return[tuple[[<ast.Name object at 0x7da1b26af460>, <ast.Name object at 0x7da1b26adb10>]]] | keyword[def] identifier[get_counter] ( identifier[data] , identifier[base] ):
literal[string]
identifier[b0] = identifier[data] [ identifier[base] ]
identifier[b1] = identifier[data] [ identifier[base] + literal[int] ]
identifier[b2] = identifier[data] [ identifier[base] + literal[int] ]
identifier[b3] = identifier[data] [ identifier[base] + literal[int] ]
identifier[counter] = identifier[b3]
identifier[position] =( identifier[b1] << literal[int] )+ identifier[b2]
keyword[return] identifier[counter] , identifier[position] | def get_counter(data, base):
"""
See setCounters() / getCounters() methods in IJ source, ij/gui/PointRoi.java.
"""
b0 = data[base]
b1 = data[base + 1]
b2 = data[base + 2]
b3 = data[base + 3]
counter = b3
position = (b1 << 8) + b2
return (counter, position) |
def _savepath(self, filename):
"""
Returns the full path for saving the file, adding an extension
and making the filename unique as necessary.
"""
(basename, ext) = os.path.splitext(filename)
basename = basename if (ext in self.extensions) else filename
ext = ext if (ext in self.extensions) else self.extensions[0]
savepath = os.path.abspath(os.path.join(self.directory,
'%s%s' % (basename, ext)))
return (tempfile.mkstemp(ext, basename + "_", self.directory)[1]
if self.hash_suffix else savepath) | def function[_savepath, parameter[self, filename]]:
constant[
Returns the full path for saving the file, adding an extension
and making the filename unique as necessary.
]
<ast.Tuple object at 0x7da1afe0dea0> assign[=] call[name[os].path.splitext, parameter[name[filename]]]
variable[basename] assign[=] <ast.IfExp object at 0x7da1afe5a440>
variable[ext] assign[=] <ast.IfExp object at 0x7da1afe59c90>
variable[savepath] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[self].directory, binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1afe59b10>, <ast.Name object at 0x7da1afe5a410>]]]]]]]
return[<ast.IfExp object at 0x7da1afe5a140>] | keyword[def] identifier[_savepath] ( identifier[self] , identifier[filename] ):
literal[string]
( identifier[basename] , identifier[ext] )= identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )
identifier[basename] = identifier[basename] keyword[if] ( identifier[ext] keyword[in] identifier[self] . identifier[extensions] ) keyword[else] identifier[filename]
identifier[ext] = identifier[ext] keyword[if] ( identifier[ext] keyword[in] identifier[self] . identifier[extensions] ) keyword[else] identifier[self] . identifier[extensions] [ literal[int] ]
identifier[savepath] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[directory] ,
literal[string] %( identifier[basename] , identifier[ext] )))
keyword[return] ( identifier[tempfile] . identifier[mkstemp] ( identifier[ext] , identifier[basename] + literal[string] , identifier[self] . identifier[directory] )[ literal[int] ]
keyword[if] identifier[self] . identifier[hash_suffix] keyword[else] identifier[savepath] ) | def _savepath(self, filename):
"""
Returns the full path for saving the file, adding an extension
and making the filename unique as necessary.
"""
(basename, ext) = os.path.splitext(filename)
basename = basename if ext in self.extensions else filename
ext = ext if ext in self.extensions else self.extensions[0]
savepath = os.path.abspath(os.path.join(self.directory, '%s%s' % (basename, ext)))
return tempfile.mkstemp(ext, basename + '_', self.directory)[1] if self.hash_suffix else savepath |
def _run_init(
argv,
flags_parser,
):
"""Does one-time initialization and re-parses flags on rerun."""
if _run_init.done:
return flags_parser(argv)
command_name.make_process_name_useful()
# Set up absl logging handler.
logging.use_absl_handler()
args = _register_and_parse_flags_with_usage(
argv=argv,
flags_parser=flags_parser,
)
if faulthandler:
try:
faulthandler.enable()
except Exception: # pylint: disable=broad-except
# Some tests verify stderr output very closely, so don't print anything.
# Disabled faulthandler is a low-impact error.
pass
_run_init.done = True
return args | def function[_run_init, parameter[argv, flags_parser]]:
constant[Does one-time initialization and re-parses flags on rerun.]
if name[_run_init].done begin[:]
return[call[name[flags_parser], parameter[name[argv]]]]
call[name[command_name].make_process_name_useful, parameter[]]
call[name[logging].use_absl_handler, parameter[]]
variable[args] assign[=] call[name[_register_and_parse_flags_with_usage], parameter[]]
if name[faulthandler] begin[:]
<ast.Try object at 0x7da1b19ef730>
name[_run_init].done assign[=] constant[True]
return[name[args]] | keyword[def] identifier[_run_init] (
identifier[argv] ,
identifier[flags_parser] ,
):
literal[string]
keyword[if] identifier[_run_init] . identifier[done] :
keyword[return] identifier[flags_parser] ( identifier[argv] )
identifier[command_name] . identifier[make_process_name_useful] ()
identifier[logging] . identifier[use_absl_handler] ()
identifier[args] = identifier[_register_and_parse_flags_with_usage] (
identifier[argv] = identifier[argv] ,
identifier[flags_parser] = identifier[flags_parser] ,
)
keyword[if] identifier[faulthandler] :
keyword[try] :
identifier[faulthandler] . identifier[enable] ()
keyword[except] identifier[Exception] :
keyword[pass]
identifier[_run_init] . identifier[done] = keyword[True]
keyword[return] identifier[args] | def _run_init(argv, flags_parser):
"""Does one-time initialization and re-parses flags on rerun."""
if _run_init.done:
return flags_parser(argv) # depends on [control=['if'], data=[]]
command_name.make_process_name_useful()
# Set up absl logging handler.
logging.use_absl_handler()
args = _register_and_parse_flags_with_usage(argv=argv, flags_parser=flags_parser)
if faulthandler:
try:
faulthandler.enable() # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=broad-except
# Some tests verify stderr output very closely, so don't print anything.
# Disabled faulthandler is a low-impact error.
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
_run_init.done = True
return args |
def _update_api_client(self, api_parent_class=None):
"""Updates the ApiClient object of specified parent api (or all of them)"""
clients = ([self.api_clients[api_parent_class]]
if api_parent_class else self.api_clients.values())
for api_client in clients:
api_client.configuration.host = (self.config.get('host') or
api_client.configuration.host)
api_client.configuration.api_key['Authorization'] = self.config['api_key'] | def function[_update_api_client, parameter[self, api_parent_class]]:
constant[Updates the ApiClient object of specified parent api (or all of them)]
variable[clients] assign[=] <ast.IfExp object at 0x7da1b040ffa0>
for taget[name[api_client]] in starred[name[clients]] begin[:]
name[api_client].configuration.host assign[=] <ast.BoolOp object at 0x7da1b040c550>
call[name[api_client].configuration.api_key][constant[Authorization]] assign[=] call[name[self].config][constant[api_key]] | keyword[def] identifier[_update_api_client] ( identifier[self] , identifier[api_parent_class] = keyword[None] ):
literal[string]
identifier[clients] =([ identifier[self] . identifier[api_clients] [ identifier[api_parent_class] ]]
keyword[if] identifier[api_parent_class] keyword[else] identifier[self] . identifier[api_clients] . identifier[values] ())
keyword[for] identifier[api_client] keyword[in] identifier[clients] :
identifier[api_client] . identifier[configuration] . identifier[host] =( identifier[self] . identifier[config] . identifier[get] ( literal[string] ) keyword[or]
identifier[api_client] . identifier[configuration] . identifier[host] )
identifier[api_client] . identifier[configuration] . identifier[api_key] [ literal[string] ]= identifier[self] . identifier[config] [ literal[string] ] | def _update_api_client(self, api_parent_class=None):
"""Updates the ApiClient object of specified parent api (or all of them)"""
clients = [self.api_clients[api_parent_class]] if api_parent_class else self.api_clients.values()
for api_client in clients:
api_client.configuration.host = self.config.get('host') or api_client.configuration.host
api_client.configuration.api_key['Authorization'] = self.config['api_key'] # depends on [control=['for'], data=['api_client']] |
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
if not id_list:
return {}
qs = self._clone()
qs.add_filter(('pk__in', id_list))
qs._clear_ordering(force_empty=True)
return dict([(obj._get_pk_val(), obj) for obj in qs]) | def function[in_bulk, parameter[self, id_list]]:
constant[
Returns a dictionary mapping each of the given IDs to the object with
that ID.
]
if <ast.UnaryOp object at 0x7da1b0e6c0a0> begin[:]
return[dictionary[[], []]]
variable[qs] assign[=] call[name[self]._clone, parameter[]]
call[name[qs].add_filter, parameter[tuple[[<ast.Constant object at 0x7da1b0e6d6c0>, <ast.Name object at 0x7da1b0e6d960>]]]]
call[name[qs]._clear_ordering, parameter[]]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da1b0e6e1a0>]]] | keyword[def] identifier[in_bulk] ( identifier[self] , identifier[id_list] ):
literal[string]
keyword[if] keyword[not] identifier[id_list] :
keyword[return] {}
identifier[qs] = identifier[self] . identifier[_clone] ()
identifier[qs] . identifier[add_filter] (( literal[string] , identifier[id_list] ))
identifier[qs] . identifier[_clear_ordering] ( identifier[force_empty] = keyword[True] )
keyword[return] identifier[dict] ([( identifier[obj] . identifier[_get_pk_val] (), identifier[obj] ) keyword[for] identifier[obj] keyword[in] identifier[qs] ]) | def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
if not id_list:
return {} # depends on [control=['if'], data=[]]
qs = self._clone()
qs.add_filter(('pk__in', id_list))
qs._clear_ordering(force_empty=True)
return dict([(obj._get_pk_val(), obj) for obj in qs]) |
def should_break_here(self, frame):
"""Check wether there is a breakpoint at this frame."""
# Next line commented out for performance
#_logger.b_debug("should_break_here(filename=%s, lineno=%s) with breaks=%s",
# frame.f_code.co_filename,
# frame.f_lineno,
# IKBreakpoint.breakpoints_by_number)
c_file_name = self.canonic(frame.f_code.co_filename)
if not c_file_name in IKBreakpoint.breakpoints_files:
return False
bp = IKBreakpoint.lookup_effective_breakpoint(c_file_name,
frame.f_lineno,
frame)
return True if bp else False | def function[should_break_here, parameter[self, frame]]:
constant[Check wether there is a breakpoint at this frame.]
variable[c_file_name] assign[=] call[name[self].canonic, parameter[name[frame].f_code.co_filename]]
if <ast.UnaryOp object at 0x7da1b23466b0> begin[:]
return[constant[False]]
variable[bp] assign[=] call[name[IKBreakpoint].lookup_effective_breakpoint, parameter[name[c_file_name], name[frame].f_lineno, name[frame]]]
return[<ast.IfExp object at 0x7da1b23460b0>] | keyword[def] identifier[should_break_here] ( identifier[self] , identifier[frame] ):
literal[string]
identifier[c_file_name] = identifier[self] . identifier[canonic] ( identifier[frame] . identifier[f_code] . identifier[co_filename] )
keyword[if] keyword[not] identifier[c_file_name] keyword[in] identifier[IKBreakpoint] . identifier[breakpoints_files] :
keyword[return] keyword[False]
identifier[bp] = identifier[IKBreakpoint] . identifier[lookup_effective_breakpoint] ( identifier[c_file_name] ,
identifier[frame] . identifier[f_lineno] ,
identifier[frame] )
keyword[return] keyword[True] keyword[if] identifier[bp] keyword[else] keyword[False] | def should_break_here(self, frame):
"""Check wether there is a breakpoint at this frame."""
# Next line commented out for performance
#_logger.b_debug("should_break_here(filename=%s, lineno=%s) with breaks=%s",
# frame.f_code.co_filename,
# frame.f_lineno,
# IKBreakpoint.breakpoints_by_number)
c_file_name = self.canonic(frame.f_code.co_filename)
if not c_file_name in IKBreakpoint.breakpoints_files:
return False # depends on [control=['if'], data=[]]
bp = IKBreakpoint.lookup_effective_breakpoint(c_file_name, frame.f_lineno, frame)
return True if bp else False |
def generate_folder_names(self):
"""Set appropriate folder names."""
self.project_dir = os.path.join(prms.Paths.outdatadir, self.project)
self.batch_dir = os.path.join(self.project_dir, self.name)
self.raw_dir = os.path.join(self.batch_dir, "raw_data") | def function[generate_folder_names, parameter[self]]:
constant[Set appropriate folder names.]
name[self].project_dir assign[=] call[name[os].path.join, parameter[name[prms].Paths.outdatadir, name[self].project]]
name[self].batch_dir assign[=] call[name[os].path.join, parameter[name[self].project_dir, name[self].name]]
name[self].raw_dir assign[=] call[name[os].path.join, parameter[name[self].batch_dir, constant[raw_data]]] | keyword[def] identifier[generate_folder_names] ( identifier[self] ):
literal[string]
identifier[self] . identifier[project_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[prms] . identifier[Paths] . identifier[outdatadir] , identifier[self] . identifier[project] )
identifier[self] . identifier[batch_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[project_dir] , identifier[self] . identifier[name] )
identifier[self] . identifier[raw_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[batch_dir] , literal[string] ) | def generate_folder_names(self):
"""Set appropriate folder names."""
self.project_dir = os.path.join(prms.Paths.outdatadir, self.project)
self.batch_dir = os.path.join(self.project_dir, self.name)
self.raw_dir = os.path.join(self.batch_dir, 'raw_data') |
def drop_table(self, model, cascade=True):
"""Drop model and table from database.
>> migrator.drop_table(model, cascade=True)
"""
del self.orm[model._meta.table_name]
self.ops.append(self.migrator.drop_table(model, cascade)) | def function[drop_table, parameter[self, model, cascade]]:
constant[Drop model and table from database.
>> migrator.drop_table(model, cascade=True)
]
<ast.Delete object at 0x7da2054a4040>
call[name[self].ops.append, parameter[call[name[self].migrator.drop_table, parameter[name[model], name[cascade]]]]] | keyword[def] identifier[drop_table] ( identifier[self] , identifier[model] , identifier[cascade] = keyword[True] ):
literal[string]
keyword[del] identifier[self] . identifier[orm] [ identifier[model] . identifier[_meta] . identifier[table_name] ]
identifier[self] . identifier[ops] . identifier[append] ( identifier[self] . identifier[migrator] . identifier[drop_table] ( identifier[model] , identifier[cascade] )) | def drop_table(self, model, cascade=True):
"""Drop model and table from database.
>> migrator.drop_table(model, cascade=True)
"""
del self.orm[model._meta.table_name]
self.ops.append(self.migrator.drop_table(model, cascade)) |
def hash_full_tree(self, leaves):
"""Hash a set of leaves representing a valid full tree."""
root_hash, hashes = self._hash_full(leaves, 0, len(leaves))
assert len(hashes) == count_bits_set(len(leaves))
assert (self._hash_fold(hashes) == root_hash if hashes else
root_hash == self.hash_empty())
return root_hash | def function[hash_full_tree, parameter[self, leaves]]:
constant[Hash a set of leaves representing a valid full tree.]
<ast.Tuple object at 0x7da1b2586500> assign[=] call[name[self]._hash_full, parameter[name[leaves], constant[0], call[name[len], parameter[name[leaves]]]]]
assert[compare[call[name[len], parameter[name[hashes]]] equal[==] call[name[count_bits_set], parameter[call[name[len], parameter[name[leaves]]]]]]]
assert[<ast.IfExp object at 0x7da1b2586590>]
return[name[root_hash]] | keyword[def] identifier[hash_full_tree] ( identifier[self] , identifier[leaves] ):
literal[string]
identifier[root_hash] , identifier[hashes] = identifier[self] . identifier[_hash_full] ( identifier[leaves] , literal[int] , identifier[len] ( identifier[leaves] ))
keyword[assert] identifier[len] ( identifier[hashes] )== identifier[count_bits_set] ( identifier[len] ( identifier[leaves] ))
keyword[assert] ( identifier[self] . identifier[_hash_fold] ( identifier[hashes] )== identifier[root_hash] keyword[if] identifier[hashes] keyword[else]
identifier[root_hash] == identifier[self] . identifier[hash_empty] ())
keyword[return] identifier[root_hash] | def hash_full_tree(self, leaves):
"""Hash a set of leaves representing a valid full tree."""
(root_hash, hashes) = self._hash_full(leaves, 0, len(leaves))
assert len(hashes) == count_bits_set(len(leaves))
assert self._hash_fold(hashes) == root_hash if hashes else root_hash == self.hash_empty()
return root_hash |
def p_pointer(self, p):
'pointer : identifier LBRACKET expression RBRACKET'
p[0] = Pointer(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | def function[p_pointer, parameter[self, p]]:
constant[pointer : identifier LBRACKET expression RBRACKET]
call[name[p]][constant[0]] assign[=] call[name[Pointer], parameter[call[name[p]][constant[1]], call[name[p]][constant[3]]]]
call[name[p].set_lineno, parameter[constant[0], call[name[p].lineno, parameter[constant[1]]]]] | keyword[def] identifier[p_pointer] ( identifier[self] , identifier[p] ):
literal[string]
identifier[p] [ literal[int] ]= identifier[Pointer] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
identifier[p] . identifier[set_lineno] ( literal[int] , identifier[p] . identifier[lineno] ( literal[int] )) | def p_pointer(self, p):
"""pointer : identifier LBRACKET expression RBRACKET"""
p[0] = Pointer(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
async def googlecast(dev: Device, target, value):
"""Return Googlecast settings."""
if target and value:
click.echo("Setting %s = %s" % (target, value))
await dev.set_googlecast_settings(target, value)
print_settings(await dev.get_googlecast_settings()) | <ast.AsyncFunctionDef object at 0x7da20cabebf0> | keyword[async] keyword[def] identifier[googlecast] ( identifier[dev] : identifier[Device] , identifier[target] , identifier[value] ):
literal[string]
keyword[if] identifier[target] keyword[and] identifier[value] :
identifier[click] . identifier[echo] ( literal[string] %( identifier[target] , identifier[value] ))
keyword[await] identifier[dev] . identifier[set_googlecast_settings] ( identifier[target] , identifier[value] )
identifier[print_settings] ( keyword[await] identifier[dev] . identifier[get_googlecast_settings] ()) | async def googlecast(dev: Device, target, value):
"""Return Googlecast settings."""
if target and value:
click.echo('Setting %s = %s' % (target, value))
await dev.set_googlecast_settings(target, value) # depends on [control=['if'], data=[]]
print_settings(await dev.get_googlecast_settings()) |
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict['ds'] = self._ds
return json.dumps(json_dict) | def function[to_json, parameter[self]]:
constant[
:return: str
]
variable[json_dict] assign[=] call[name[self].to_json_basic, parameter[]]
call[name[json_dict]][constant[ds]] assign[=] name[self]._ds
return[call[name[json].dumps, parameter[name[json_dict]]]] | keyword[def] identifier[to_json] ( identifier[self] ):
literal[string]
identifier[json_dict] = identifier[self] . identifier[to_json_basic] ()
identifier[json_dict] [ literal[string] ]= identifier[self] . identifier[_ds]
keyword[return] identifier[json] . identifier[dumps] ( identifier[json_dict] ) | def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict['ds'] = self._ds
return json.dumps(json_dict) |
def prepare_backend_environ(self, host, method, relative_url, headers, body,
source_ip, port):
"""Build an environ object for the backend to consume.
Args:
host: A string containing the host serving the request.
method: A string containing the HTTP method of the request.
relative_url: A string containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both
strings.
body: A string containing the request body.
source_ip: The source IP address for the request.
port: The port to which to direct the request.
Returns:
An environ object with all the information necessary for the backend to
process the request.
"""
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (host, port)
else:
host = host
environ = {'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ | def function[prepare_backend_environ, parameter[self, host, method, relative_url, headers, body, source_ip, port]]:
constant[Build an environ object for the backend to consume.
Args:
host: A string containing the host serving the request.
method: A string containing the HTTP method of the request.
relative_url: A string containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both
strings.
body: A string containing the request body.
source_ip: The source IP address for the request.
port: The port to which to direct the request.
Returns:
An environ object with all the information necessary for the backend to
process the request.
]
if call[name[isinstance], parameter[name[body], name[unicode]]] begin[:]
variable[body] assign[=] call[name[body].encode, parameter[constant[ascii]]]
variable[url] assign[=] call[name[urlparse].urlsplit, parameter[name[relative_url]]]
if compare[name[port] not_equal[!=] constant[80]] begin[:]
variable[host] assign[=] binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0eff550>, <ast.Name object at 0x7da1b0eff370>]]]
variable[environ] assign[=] dictionary[[<ast.Constant object at 0x7da1b0efddb0>, <ast.Constant object at 0x7da1b0efcaf0>, <ast.Constant object at 0x7da1b0efea70>, <ast.Constant object at 0x7da1b0eff280>, <ast.Constant object at 0x7da1b0efd0f0>, <ast.Constant object at 0x7da1b0efcee0>, <ast.Constant object at 0x7da1b0efe5c0>, <ast.Constant object at 0x7da1b0efec20>, <ast.Constant object at 0x7da1b0efe6e0>, <ast.Constant object at 0x7da1b0effbe0>, <ast.Constant object at 0x7da1b0efca00>, <ast.Constant object at 0x7da1b0efcd90>, <ast.Constant object at 0x7da1b0efc820>, <ast.Constant object at 0x7da1b0efdf90>], [<ast.Call object at 0x7da1b0efdb70>, <ast.Attribute object at 0x7da1b0eff8b0>, <ast.Attribute object at 0x7da1b0efcca0>, <ast.Name object at 0x7da1b0eff250>, <ast.Name object at 0x7da1b0efe380>, <ast.Name object at 0x7da1b0efdae0>, <ast.Call object at 0x7da1b0efdb10>, <ast.Constant object at 0x7da1b0efec80>, <ast.Tuple object at 0x7da1b0eff4c0>, <ast.Constant object at 0x7da1b0eff640>, <ast.Call object at 0x7da1b0eff1f0>, <ast.Constant object at 0x7da1b0efdf30>, <ast.Constant object at 0x7da1b0efc910>, <ast.Call object at 0x7da1b0efc310>]]
call[name[util].put_headers_in_environ, parameter[name[headers], name[environ]]]
call[name[environ]][constant[HTTP_HOST]] assign[=] name[host]
return[name[environ]] | keyword[def] identifier[prepare_backend_environ] ( identifier[self] , identifier[host] , identifier[method] , identifier[relative_url] , identifier[headers] , identifier[body] ,
identifier[source_ip] , identifier[port] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[body] , identifier[unicode] ):
identifier[body] = identifier[body] . identifier[encode] ( literal[string] )
identifier[url] = identifier[urlparse] . identifier[urlsplit] ( identifier[relative_url] )
keyword[if] identifier[port] != literal[int] :
identifier[host] = literal[string] %( identifier[host] , identifier[port] )
keyword[else] :
identifier[host] = identifier[host]
identifier[environ] ={ literal[string] : identifier[str] ( identifier[len] ( identifier[body] )),
literal[string] : identifier[url] . identifier[path] ,
literal[string] : identifier[url] . identifier[query] ,
literal[string] : identifier[method] ,
literal[string] : identifier[source_ip] ,
literal[string] : identifier[host] ,
literal[string] : identifier[str] ( identifier[port] ),
literal[string] : literal[string] ,
literal[string] :( literal[int] , literal[int] ),
literal[string] : literal[string] ,
literal[string] : identifier[cStringIO] . identifier[StringIO] (),
literal[string] : keyword[True] ,
literal[string] : keyword[True] ,
literal[string] : identifier[cStringIO] . identifier[StringIO] ( identifier[body] )}
identifier[util] . identifier[put_headers_in_environ] ( identifier[headers] , identifier[environ] )
identifier[environ] [ literal[string] ]= identifier[host]
keyword[return] identifier[environ] | def prepare_backend_environ(self, host, method, relative_url, headers, body, source_ip, port):
"""Build an environ object for the backend to consume.
Args:
host: A string containing the host serving the request.
method: A string containing the HTTP method of the request.
relative_url: A string containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both
strings.
body: A string containing the request body.
source_ip: The source IP address for the request.
port: The port to which to direct the request.
Returns:
An environ object with all the information necessary for the backend to
process the request.
"""
if isinstance(body, unicode):
body = body.encode('ascii') # depends on [control=['if'], data=[]]
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (host, port) # depends on [control=['if'], data=['port']]
else:
host = host
environ = {'CONTENT_LENGTH': str(len(body)), 'PATH_INFO': url.path, 'QUERY_STRING': url.query, 'REQUEST_METHOD': method, 'REMOTE_ADDR': source_ip, 'SERVER_NAME': host, 'SERVER_PORT': str(port), 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.errors': cStringIO.StringIO(), 'wsgi.multithread': True, 'wsgi.multiprocess': True, 'wsgi.input': cStringIO.StringIO(body)}
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ |
def get_if_raw_addr6(iff):
"""
Returns the main global unicast address associated with provided
interface, in network format. If no global address is found, None
is returned.
"""
#r = filter(lambda x: x[2] == iff and x[1] == IPV6_ADDR_GLOBAL, in6_getifaddr())
r = [ x for x in in6_getifaddr() if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL]
if len(r) == 0:
return None
else:
r = r[0][0]
return inet_pton(socket.AF_INET6, r) | def function[get_if_raw_addr6, parameter[iff]]:
constant[
Returns the main global unicast address associated with provided
interface, in network format. If no global address is found, None
is returned.
]
variable[r] assign[=] <ast.ListComp object at 0x7da1b1206c50>
if compare[call[name[len], parameter[name[r]]] equal[==] constant[0]] begin[:]
return[constant[None]]
return[call[name[inet_pton], parameter[name[socket].AF_INET6, name[r]]]] | keyword[def] identifier[get_if_raw_addr6] ( identifier[iff] ):
literal[string]
identifier[r] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[in6_getifaddr] () keyword[if] identifier[x] [ literal[int] ]== identifier[iff] keyword[and] identifier[x] [ literal[int] ]== identifier[IPV6_ADDR_GLOBAL] ]
keyword[if] identifier[len] ( identifier[r] )== literal[int] :
keyword[return] keyword[None]
keyword[else] :
identifier[r] = identifier[r] [ literal[int] ][ literal[int] ]
keyword[return] identifier[inet_pton] ( identifier[socket] . identifier[AF_INET6] , identifier[r] ) | def get_if_raw_addr6(iff):
"""
Returns the main global unicast address associated with provided
interface, in network format. If no global address is found, None
is returned.
"""
#r = filter(lambda x: x[2] == iff and x[1] == IPV6_ADDR_GLOBAL, in6_getifaddr())
r = [x for x in in6_getifaddr() if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL]
if len(r) == 0:
return None # depends on [control=['if'], data=[]]
else:
r = r[0][0]
return inet_pton(socket.AF_INET6, r) |
def push_stack(stack, substack, op_id):
"""Proxy of push, where we know we're pushing a stack onto a stack.
Used when differentiating call trees,where sub-functions get their own stack.
See push() for more.
Args:
stack: The stack object, which must support appending values.
substack: The stack to append.
op_id: A unique variable that is also passed into the corresponding pop.
Allows optimization passes to track pairs of pushes and pops.
Raises:
ValueError: If a non-stack value for `substack` is passed.
"""
if substack is not None and not isinstance(substack, Stack):
raise ValueError(
'Substack should be type tangent.Stack or None, instead found %s' %
type(substack))
if __debug__:
stack.append((substack, op_id))
else:
stack.append(substack) | def function[push_stack, parameter[stack, substack, op_id]]:
constant[Proxy of push, where we know we're pushing a stack onto a stack.
Used when differentiating call trees,where sub-functions get their own stack.
See push() for more.
Args:
stack: The stack object, which must support appending values.
substack: The stack to append.
op_id: A unique variable that is also passed into the corresponding pop.
Allows optimization passes to track pairs of pushes and pops.
Raises:
ValueError: If a non-stack value for `substack` is passed.
]
if <ast.BoolOp object at 0x7da1b1ddcb20> begin[:]
<ast.Raise object at 0x7da1b1dddd20>
if name[__debug__] begin[:]
call[name[stack].append, parameter[tuple[[<ast.Name object at 0x7da1b1ddf1f0>, <ast.Name object at 0x7da1b1ddde10>]]]] | keyword[def] identifier[push_stack] ( identifier[stack] , identifier[substack] , identifier[op_id] ):
literal[string]
keyword[if] identifier[substack] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[substack] , identifier[Stack] ):
keyword[raise] identifier[ValueError] (
literal[string] %
identifier[type] ( identifier[substack] ))
keyword[if] identifier[__debug__] :
identifier[stack] . identifier[append] (( identifier[substack] , identifier[op_id] ))
keyword[else] :
identifier[stack] . identifier[append] ( identifier[substack] ) | def push_stack(stack, substack, op_id):
"""Proxy of push, where we know we're pushing a stack onto a stack.
Used when differentiating call trees,where sub-functions get their own stack.
See push() for more.
Args:
stack: The stack object, which must support appending values.
substack: The stack to append.
op_id: A unique variable that is also passed into the corresponding pop.
Allows optimization passes to track pairs of pushes and pops.
Raises:
ValueError: If a non-stack value for `substack` is passed.
"""
if substack is not None and (not isinstance(substack, Stack)):
raise ValueError('Substack should be type tangent.Stack or None, instead found %s' % type(substack)) # depends on [control=['if'], data=[]]
if __debug__:
stack.append((substack, op_id)) # depends on [control=['if'], data=[]]
else:
stack.append(substack) |
def phi(self, Xpred, degrees=None):
"""
Compute the design matrix for this model
using the degrees given by the index array
in degrees
:param array-like Xpred: inputs to compute the design matrix for
:param array-like degrees: array of degrees to use [default=range(self.degree+1)]
:returns array-like phi: The design matrix [degree x #samples x #dimensions]
"""
assert Xpred.shape[1] == self.X.shape[1], "Need to predict with same shape as training data."
if degrees is None:
degrees = range(self.basis.degree+1)
tmp_phi = np.empty((len(degrees), Xpred.shape[0], Xpred.shape[1]))
for i, w in enumerate(degrees):
# Objective function
tmpX = self._phi(Xpred, w)
tmp_phi[i] = tmpX * self.weights[[w], :]
return tmp_phi | def function[phi, parameter[self, Xpred, degrees]]:
constant[
Compute the design matrix for this model
using the degrees given by the index array
in degrees
:param array-like Xpred: inputs to compute the design matrix for
:param array-like degrees: array of degrees to use [default=range(self.degree+1)]
:returns array-like phi: The design matrix [degree x #samples x #dimensions]
]
assert[compare[call[name[Xpred].shape][constant[1]] equal[==] call[name[self].X.shape][constant[1]]]]
if compare[name[degrees] is constant[None]] begin[:]
variable[degrees] assign[=] call[name[range], parameter[binary_operation[name[self].basis.degree + constant[1]]]]
variable[tmp_phi] assign[=] call[name[np].empty, parameter[tuple[[<ast.Call object at 0x7da1b0f5b640>, <ast.Subscript object at 0x7da1b0f5b220>, <ast.Subscript object at 0x7da1b0f59270>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0f5bf10>, <ast.Name object at 0x7da1b0f5a590>]]] in starred[call[name[enumerate], parameter[name[degrees]]]] begin[:]
variable[tmpX] assign[=] call[name[self]._phi, parameter[name[Xpred], name[w]]]
call[name[tmp_phi]][name[i]] assign[=] binary_operation[name[tmpX] * call[name[self].weights][tuple[[<ast.List object at 0x7da1b0d007c0>, <ast.Slice object at 0x7da1b0d012a0>]]]]
return[name[tmp_phi]] | keyword[def] identifier[phi] ( identifier[self] , identifier[Xpred] , identifier[degrees] = keyword[None] ):
literal[string]
keyword[assert] identifier[Xpred] . identifier[shape] [ literal[int] ]== identifier[self] . identifier[X] . identifier[shape] [ literal[int] ], literal[string]
keyword[if] identifier[degrees] keyword[is] keyword[None] :
identifier[degrees] = identifier[range] ( identifier[self] . identifier[basis] . identifier[degree] + literal[int] )
identifier[tmp_phi] = identifier[np] . identifier[empty] (( identifier[len] ( identifier[degrees] ), identifier[Xpred] . identifier[shape] [ literal[int] ], identifier[Xpred] . identifier[shape] [ literal[int] ]))
keyword[for] identifier[i] , identifier[w] keyword[in] identifier[enumerate] ( identifier[degrees] ):
identifier[tmpX] = identifier[self] . identifier[_phi] ( identifier[Xpred] , identifier[w] )
identifier[tmp_phi] [ identifier[i] ]= identifier[tmpX] * identifier[self] . identifier[weights] [[ identifier[w] ],:]
keyword[return] identifier[tmp_phi] | def phi(self, Xpred, degrees=None):
"""
Compute the design matrix for this model
using the degrees given by the index array
in degrees
:param array-like Xpred: inputs to compute the design matrix for
:param array-like degrees: array of degrees to use [default=range(self.degree+1)]
:returns array-like phi: The design matrix [degree x #samples x #dimensions]
"""
assert Xpred.shape[1] == self.X.shape[1], 'Need to predict with same shape as training data.'
if degrees is None:
degrees = range(self.basis.degree + 1) # depends on [control=['if'], data=['degrees']]
tmp_phi = np.empty((len(degrees), Xpred.shape[0], Xpred.shape[1]))
for (i, w) in enumerate(degrees):
# Objective function
tmpX = self._phi(Xpred, w)
tmp_phi[i] = tmpX * self.weights[[w], :] # depends on [control=['for'], data=[]]
return tmp_phi |
def dump_migration_session_state(raw):
"""
Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "line 1\nline2\nline3"
You get this:
- migration: [app, migration_name]
output: |
line 1
line 2
line 3
"""
class BlockStyle(str): pass
class SessionDumper(yaml.SafeDumper): pass
def str_block_formatter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
SessionDumper.add_representer(BlockStyle, str_block_formatter)
raw = deepcopy(raw)
for step in raw:
step['output'] = BlockStyle(step['output'])
step['traceback'] = BlockStyle(step['traceback'])
return yaml.dump(raw, Dumper=SessionDumper) | def function[dump_migration_session_state, parameter[raw]]:
constant[
Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "line 1
line2
line3"
You get this:
- migration: [app, migration_name]
output: |
line 1
line 2
line 3
]
class class[BlockStyle, parameter[]] begin[:]
pass
class class[SessionDumper, parameter[]] begin[:]
pass
def function[str_block_formatter, parameter[dumper, data]]:
return[call[name[dumper].represent_scalar, parameter[constant[tag:yaml.org,2002:str], name[data]]]]
call[name[SessionDumper].add_representer, parameter[name[BlockStyle], name[str_block_formatter]]]
variable[raw] assign[=] call[name[deepcopy], parameter[name[raw]]]
for taget[name[step]] in starred[name[raw]] begin[:]
call[name[step]][constant[output]] assign[=] call[name[BlockStyle], parameter[call[name[step]][constant[output]]]]
call[name[step]][constant[traceback]] assign[=] call[name[BlockStyle], parameter[call[name[step]][constant[traceback]]]]
return[call[name[yaml].dump, parameter[name[raw]]]] | keyword[def] identifier[dump_migration_session_state] ( identifier[raw] ):
literal[string]
keyword[class] identifier[BlockStyle] ( identifier[str] ): keyword[pass]
keyword[class] identifier[SessionDumper] ( identifier[yaml] . identifier[SafeDumper] ): keyword[pass]
keyword[def] identifier[str_block_formatter] ( identifier[dumper] , identifier[data] ):
keyword[return] identifier[dumper] . identifier[represent_scalar] ( literal[string] , identifier[data] , identifier[style] = literal[string] )
identifier[SessionDumper] . identifier[add_representer] ( identifier[BlockStyle] , identifier[str_block_formatter] )
identifier[raw] = identifier[deepcopy] ( identifier[raw] )
keyword[for] identifier[step] keyword[in] identifier[raw] :
identifier[step] [ literal[string] ]= identifier[BlockStyle] ( identifier[step] [ literal[string] ])
identifier[step] [ literal[string] ]= identifier[BlockStyle] ( identifier[step] [ literal[string] ])
keyword[return] identifier[yaml] . identifier[dump] ( identifier[raw] , identifier[Dumper] = identifier[SessionDumper] ) | def dump_migration_session_state(raw):
"""
Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "line 1
line2
line3"
You get this:
- migration: [app, migration_name]
output: |
line 1
line 2
line 3
"""
class BlockStyle(str):
pass
class SessionDumper(yaml.SafeDumper):
pass
def str_block_formatter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
SessionDumper.add_representer(BlockStyle, str_block_formatter)
raw = deepcopy(raw)
for step in raw:
step['output'] = BlockStyle(step['output'])
step['traceback'] = BlockStyle(step['traceback']) # depends on [control=['for'], data=['step']]
return yaml.dump(raw, Dumper=SessionDumper) |
def list_files(self, prefix, flat=False):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file("")
path = os.path.join(layer_path, prefix)
@retry
def s3lst(continuation_token=None):
kwargs = {
'Bucket': self._path.bucket,
'Prefix': path,
}
if continuation_token:
kwargs['ContinuationToken'] = continuation_token
return self._conn.list_objects_v2(**kwargs)
resp = s3lst()
def iterate(resp):
if 'Contents' not in resp.keys():
resp['Contents'] = []
for item in resp['Contents']:
key = item['Key']
filename = key.replace(layer_path, '')
if not flat and filename[-1] != '/':
yield filename
elif flat and '/' not in key.replace(path, ''):
yield filename
for filename in iterate(resp):
yield filename
while resp['IsTruncated'] and resp['NextContinuationToken']:
resp = s3lst(resp['NextContinuationToken'])
for filename in iterate(resp):
yield filename | def function[list_files, parameter[self, prefix, flat]]:
constant[
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
]
variable[layer_path] assign[=] call[name[self].get_path_to_file, parameter[constant[]]]
variable[path] assign[=] call[name[os].path.join, parameter[name[layer_path], name[prefix]]]
def function[s3lst, parameter[continuation_token]]:
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da2045671c0>, <ast.Constant object at 0x7da204565e40>], [<ast.Attribute object at 0x7da2045651b0>, <ast.Name object at 0x7da204567010>]]
if name[continuation_token] begin[:]
call[name[kwargs]][constant[ContinuationToken]] assign[=] name[continuation_token]
return[call[name[self]._conn.list_objects_v2, parameter[]]]
variable[resp] assign[=] call[name[s3lst], parameter[]]
def function[iterate, parameter[resp]]:
if compare[constant[Contents] <ast.NotIn object at 0x7da2590d7190> call[name[resp].keys, parameter[]]] begin[:]
call[name[resp]][constant[Contents]] assign[=] list[[]]
for taget[name[item]] in starred[call[name[resp]][constant[Contents]]] begin[:]
variable[key] assign[=] call[name[item]][constant[Key]]
variable[filename] assign[=] call[name[key].replace, parameter[name[layer_path], constant[]]]
if <ast.BoolOp object at 0x7da204565a80> begin[:]
<ast.Yield object at 0x7da204566830>
for taget[name[filename]] in starred[call[name[iterate], parameter[name[resp]]]] begin[:]
<ast.Yield object at 0x7da2045661d0>
while <ast.BoolOp object at 0x7da204567700> begin[:]
variable[resp] assign[=] call[name[s3lst], parameter[call[name[resp]][constant[NextContinuationToken]]]]
for taget[name[filename]] in starred[call[name[iterate], parameter[name[resp]]]] begin[:]
<ast.Yield object at 0x7da204567640> | keyword[def] identifier[list_files] ( identifier[self] , identifier[prefix] , identifier[flat] = keyword[False] ):
literal[string]
identifier[layer_path] = identifier[self] . identifier[get_path_to_file] ( literal[string] )
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[layer_path] , identifier[prefix] )
@ identifier[retry]
keyword[def] identifier[s3lst] ( identifier[continuation_token] = keyword[None] ):
identifier[kwargs] ={
literal[string] : identifier[self] . identifier[_path] . identifier[bucket] ,
literal[string] : identifier[path] ,
}
keyword[if] identifier[continuation_token] :
identifier[kwargs] [ literal[string] ]= identifier[continuation_token]
keyword[return] identifier[self] . identifier[_conn] . identifier[list_objects_v2] (** identifier[kwargs] )
identifier[resp] = identifier[s3lst] ()
keyword[def] identifier[iterate] ( identifier[resp] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[resp] . identifier[keys] ():
identifier[resp] [ literal[string] ]=[]
keyword[for] identifier[item] keyword[in] identifier[resp] [ literal[string] ]:
identifier[key] = identifier[item] [ literal[string] ]
identifier[filename] = identifier[key] . identifier[replace] ( identifier[layer_path] , literal[string] )
keyword[if] keyword[not] identifier[flat] keyword[and] identifier[filename] [- literal[int] ]!= literal[string] :
keyword[yield] identifier[filename]
keyword[elif] identifier[flat] keyword[and] literal[string] keyword[not] keyword[in] identifier[key] . identifier[replace] ( identifier[path] , literal[string] ):
keyword[yield] identifier[filename]
keyword[for] identifier[filename] keyword[in] identifier[iterate] ( identifier[resp] ):
keyword[yield] identifier[filename]
keyword[while] identifier[resp] [ literal[string] ] keyword[and] identifier[resp] [ literal[string] ]:
identifier[resp] = identifier[s3lst] ( identifier[resp] [ literal[string] ])
keyword[for] identifier[filename] keyword[in] identifier[iterate] ( identifier[resp] ):
keyword[yield] identifier[filename] | def list_files(self, prefix, flat=False):
"""
List the files in the layer with the given prefix.
flat means only generate one level of a directory,
while non-flat means generate all file paths with that
prefix.
"""
layer_path = self.get_path_to_file('')
path = os.path.join(layer_path, prefix)
@retry
def s3lst(continuation_token=None):
kwargs = {'Bucket': self._path.bucket, 'Prefix': path}
if continuation_token:
kwargs['ContinuationToken'] = continuation_token # depends on [control=['if'], data=[]]
return self._conn.list_objects_v2(**kwargs)
resp = s3lst()
def iterate(resp):
if 'Contents' not in resp.keys():
resp['Contents'] = [] # depends on [control=['if'], data=[]]
for item in resp['Contents']:
key = item['Key']
filename = key.replace(layer_path, '')
if not flat and filename[-1] != '/':
yield filename # depends on [control=['if'], data=[]]
elif flat and '/' not in key.replace(path, ''):
yield filename # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
for filename in iterate(resp):
yield filename # depends on [control=['for'], data=['filename']]
while resp['IsTruncated'] and resp['NextContinuationToken']:
resp = s3lst(resp['NextContinuationToken'])
for filename in iterate(resp):
yield filename # depends on [control=['for'], data=['filename']] # depends on [control=['while'], data=[]] |
def _check_file_exists(self, cfg_file):
"""
Check that the file exists on remote device using full path.
cfg_file is full path i.e. flash:/file_name
For example
# dir flash:/candidate_config.txt
Directory of flash:/candidate_config.txt
33 -rw- 5592 Dec 18 2015 10:50:22 -08:00 candidate_config.txt
return boolean
"""
cmd = 'dir {}'.format(cfg_file)
success_pattern = 'Directory of {}'.format(cfg_file)
output = self.device.send_command_expect(cmd)
if 'Error opening' in output:
return False
elif success_pattern in output:
return True
return False | def function[_check_file_exists, parameter[self, cfg_file]]:
constant[
Check that the file exists on remote device using full path.
cfg_file is full path i.e. flash:/file_name
For example
# dir flash:/candidate_config.txt
Directory of flash:/candidate_config.txt
33 -rw- 5592 Dec 18 2015 10:50:22 -08:00 candidate_config.txt
return boolean
]
variable[cmd] assign[=] call[constant[dir {}].format, parameter[name[cfg_file]]]
variable[success_pattern] assign[=] call[constant[Directory of {}].format, parameter[name[cfg_file]]]
variable[output] assign[=] call[name[self].device.send_command_expect, parameter[name[cmd]]]
if compare[constant[Error opening] in name[output]] begin[:]
return[constant[False]]
return[constant[False]] | keyword[def] identifier[_check_file_exists] ( identifier[self] , identifier[cfg_file] ):
literal[string]
identifier[cmd] = literal[string] . identifier[format] ( identifier[cfg_file] )
identifier[success_pattern] = literal[string] . identifier[format] ( identifier[cfg_file] )
identifier[output] = identifier[self] . identifier[device] . identifier[send_command_expect] ( identifier[cmd] )
keyword[if] literal[string] keyword[in] identifier[output] :
keyword[return] keyword[False]
keyword[elif] identifier[success_pattern] keyword[in] identifier[output] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def _check_file_exists(self, cfg_file):
"""
Check that the file exists on remote device using full path.
cfg_file is full path i.e. flash:/file_name
For example
# dir flash:/candidate_config.txt
Directory of flash:/candidate_config.txt
33 -rw- 5592 Dec 18 2015 10:50:22 -08:00 candidate_config.txt
return boolean
"""
cmd = 'dir {}'.format(cfg_file)
success_pattern = 'Directory of {}'.format(cfg_file)
output = self.device.send_command_expect(cmd)
if 'Error opening' in output:
return False # depends on [control=['if'], data=[]]
elif success_pattern in output:
return True # depends on [control=['if'], data=[]]
return False |
def open_file_for_write(filepath, mode=None):
"""Writes both to given filepath, and tmpdir location.
This is to get around the problem with some NFS's where immediately reading
a file that has just been written is problematic. Instead, any files that we
write, we also write to /tmp, and reads of these files are redirected there.
Args:
filepath (str): File to write.
mode (int): Same mode arg as you would pass to `os.chmod`.
Yields:
File-like object.
"""
stream = StringIO()
yield stream
content = stream.getvalue()
filepath = os.path.realpath(filepath)
tmpdir = tmpdir_manager.mkdtemp()
cache_filepath = os.path.join(tmpdir, os.path.basename(filepath))
debug_print("Writing to %s (local cache of %s)", cache_filepath, filepath)
with atomic_write(filepath, overwrite=True) as f:
f.write(content)
if mode is not None:
os.chmod(filepath, mode)
with open(cache_filepath, 'w') as f:
f.write(content)
file_cache[filepath] = cache_filepath | def function[open_file_for_write, parameter[filepath, mode]]:
constant[Writes both to given filepath, and tmpdir location.
This is to get around the problem with some NFS's where immediately reading
a file that has just been written is problematic. Instead, any files that we
write, we also write to /tmp, and reads of these files are redirected there.
Args:
filepath (str): File to write.
mode (int): Same mode arg as you would pass to `os.chmod`.
Yields:
File-like object.
]
variable[stream] assign[=] call[name[StringIO], parameter[]]
<ast.Yield object at 0x7da1b1883310>
variable[content] assign[=] call[name[stream].getvalue, parameter[]]
variable[filepath] assign[=] call[name[os].path.realpath, parameter[name[filepath]]]
variable[tmpdir] assign[=] call[name[tmpdir_manager].mkdtemp, parameter[]]
variable[cache_filepath] assign[=] call[name[os].path.join, parameter[name[tmpdir], call[name[os].path.basename, parameter[name[filepath]]]]]
call[name[debug_print], parameter[constant[Writing to %s (local cache of %s)], name[cache_filepath], name[filepath]]]
with call[name[atomic_write], parameter[name[filepath]]] begin[:]
call[name[f].write, parameter[name[content]]]
if compare[name[mode] is_not constant[None]] begin[:]
call[name[os].chmod, parameter[name[filepath], name[mode]]]
with call[name[open], parameter[name[cache_filepath], constant[w]]] begin[:]
call[name[f].write, parameter[name[content]]]
call[name[file_cache]][name[filepath]] assign[=] name[cache_filepath] | keyword[def] identifier[open_file_for_write] ( identifier[filepath] , identifier[mode] = keyword[None] ):
literal[string]
identifier[stream] = identifier[StringIO] ()
keyword[yield] identifier[stream]
identifier[content] = identifier[stream] . identifier[getvalue] ()
identifier[filepath] = identifier[os] . identifier[path] . identifier[realpath] ( identifier[filepath] )
identifier[tmpdir] = identifier[tmpdir_manager] . identifier[mkdtemp] ()
identifier[cache_filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[tmpdir] , identifier[os] . identifier[path] . identifier[basename] ( identifier[filepath] ))
identifier[debug_print] ( literal[string] , identifier[cache_filepath] , identifier[filepath] )
keyword[with] identifier[atomic_write] ( identifier[filepath] , identifier[overwrite] = keyword[True] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[content] )
keyword[if] identifier[mode] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[chmod] ( identifier[filepath] , identifier[mode] )
keyword[with] identifier[open] ( identifier[cache_filepath] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[content] )
identifier[file_cache] [ identifier[filepath] ]= identifier[cache_filepath] | def open_file_for_write(filepath, mode=None):
"""Writes both to given filepath, and tmpdir location.
This is to get around the problem with some NFS's where immediately reading
a file that has just been written is problematic. Instead, any files that we
write, we also write to /tmp, and reads of these files are redirected there.
Args:
filepath (str): File to write.
mode (int): Same mode arg as you would pass to `os.chmod`.
Yields:
File-like object.
"""
stream = StringIO()
yield stream
content = stream.getvalue()
filepath = os.path.realpath(filepath)
tmpdir = tmpdir_manager.mkdtemp()
cache_filepath = os.path.join(tmpdir, os.path.basename(filepath))
debug_print('Writing to %s (local cache of %s)', cache_filepath, filepath)
with atomic_write(filepath, overwrite=True) as f:
f.write(content) # depends on [control=['with'], data=['f']]
if mode is not None:
os.chmod(filepath, mode) # depends on [control=['if'], data=['mode']]
with open(cache_filepath, 'w') as f:
f.write(content) # depends on [control=['with'], data=['f']]
file_cache[filepath] = cache_filepath |
def recursive_parse_kwargs(root_func, path_=None, verbose=None):
"""
recursive kwargs parser
TODO: rectify with others
FIXME: if docstr indentation is off, this fails
SeeAlso:
argparse_funckw
recursive_parse_kwargs
parse_kwarg_keys
parse_func_kwarg_keys
get_func_kwargs
Args:
root_func (function): live python function
path_ (None): (default = None)
Returns:
list:
CommandLine:
python -m utool.util_inspect recursive_parse_kwargs:0
python -m utool.util_inspect recursive_parse_kwargs:0 --verbinspect
python -m utool.util_inspect recursive_parse_kwargs:1
python -m utool.util_inspect recursive_parse_kwargs:2 --mod plottool --func draw_histogram
python -m utool.util_inspect recursive_parse_kwargs:2 --mod vtool --func ScoreNormalizer.visualize
python -m utool.util_inspect recursive_parse_kwargs:2 --mod ibeis.viz.viz_matches --func show_name_matches --verbinspect
python -m utool.util_inspect recursive_parse_kwargs:2 --mod ibeis.expt.experiment_drawing --func draw_rank_cmc --verbinspect
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> root_func = iter_module_doctestable
>>> path_ = None
>>> result = ut.repr2(recursive_parse_kwargs(root_func), nl=1)
>>> print(result)
[
('include_funcs', True),
('include_classes', True),
('include_methods', True),
('include_builtin', True),
('include_inherited', False),
('debug_key', None),
]
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> from ibeis.algo.hots import chip_match
>>> import utool as ut
>>> root_func = chip_match.ChipMatch.show_ranked_matches
>>> path_ = None
>>> result = ut.repr2(recursive_parse_kwargs(root_func))
>>> print(result)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> modname = ut.get_argval('--mod', type_=str, default='plottool')
>>> funcname = ut.get_argval('--func', type_=str, default='draw_histogram')
>>> mod = ut.import_modname(modname)
>>> root_func = lookup_attribute_chain(funcname, mod.__dict__)
>>> path_ = None
>>> parsed = recursive_parse_kwargs(root_func)
>>> flags = ut.unique_flags(ut.take_column(parsed, 0))
>>> unique = ut.compress(parsed, flags)
>>> print('parsed = %s' % (ut.repr4(parsed),))
>>> print('unique = %s' % (ut.repr4(unique),))
"""
if verbose is None:
verbose = VERBOSE_INSPECT
if verbose:
print('[inspect] recursive parse kwargs root_func = %r ' % (root_func,))
import utool as ut
if path_ is None:
path_ = []
if root_func in path_:
if verbose:
print('[inspect] Encountered cycle. returning')
return []
path_.append(root_func)
spec = ut.get_func_argspec(root_func)
# ADD MORE
kwargs_list = []
found_explicit = list(ut.get_kwdefaults(root_func, parse_source=False).items())
if verbose:
print('[inspect] * Found explicit %r' % (found_explicit,))
#kwargs_list = [(kw,) for kw in ut.get_kwargs(root_func)[0]]
sourcecode = ut.get_func_sourcecode(root_func, strip_docstr=True,
stripdef=True)
sourcecode1 = ut.get_func_sourcecode(root_func, strip_docstr=True,
stripdef=False)
found_implicit = ut.parse_kwarg_keys(sourcecode1, spec.keywords,
with_vals=True)
if verbose:
print('[inspect] * Found found_implicit %r' % (found_implicit,))
kwargs_list = found_explicit + found_implicit
def hack_lookup_mod_attrs(attr):
# HACKS TODO: have find_funcs_called_with_kwargs infer an attribute is a
# module / function / type. In the module case, we can import it and
# look it up. Maybe args, or returns can help infer type. Maybe just
# register some known varnames. Maybe jedi has some better way to do
# this.
if attr == 'ut':
subdict = ut.__dict__
elif attr == 'pt':
import plottool as pt
subdict = pt.__dict__
else:
subdict = None
return subdict
def resolve_attr_subfunc(subfunc_name):
# look up attriute chain
#subdict = root_func.func_globals
subdict = meta_util_six.get_funcglobals(root_func)
subtup = subfunc_name.split('.')
try:
subdict = lookup_attribute_chain(subfunc_name, subdict)
if ut.is_func_or_method(subdict):
# Was subdict supposed to be named something else here?
subfunc = subdict
return subfunc
except (KeyError, TypeError):
for attr in subtup[:-1]:
try:
subdict = subdict[attr].__dict__
except (KeyError, TypeError):
# limited support for class lookup
if ut.is_method(root_func) and spec.args[0] == attr:
subdict = root_func.im_class.__dict__
else:
# FIXME TODO lookup_attribute_chain
subdict = hack_lookup_mod_attrs(attr)
if subdict is None:
print('Unable to find attribute of attr=%r' % (attr,))
if ut.SUPER_STRICT:
raise
if subdict is not None:
attr_name = subtup[-1]
subfunc = subdict[attr_name]
else:
subfunc = None
return subfunc
def check_subfunc_name(subfunc_name):
if isinstance(subfunc_name, tuple) or '.' in subfunc_name:
subfunc = resolve_attr_subfunc(subfunc_name)
else:
# try to directly take func from globals
func_globals = meta_util_six.get_funcglobals(root_func)
try:
subfunc = func_globals[subfunc_name]
except KeyError:
print('Unable to find function definition subfunc_name=%r' %
(subfunc_name,))
if ut.SUPER_STRICT:
raise
subfunc = None
if subfunc is not None:
subkw_list = recursive_parse_kwargs(subfunc, path_, verbose=verbose)
new_subkw = subkw_list
# have_keys = set(ut.take_column(kwargs_list, 0))
# new_subkw = [item for item in subkw_list
# if item[0] not in have_keys]
else:
new_subkw = []
return new_subkw
if spec.keywords is not None:
if verbose:
print('[inspect] Checking spec.keywords=%r' % (spec.keywords,))
subfunc_name_list = ut.find_funcs_called_with_kwargs(sourcecode, spec.keywords)
if verbose:
print('[inspect] Checking subfunc_name_list=%r' % (subfunc_name_list,))
for subfunc_name in subfunc_name_list:
try:
new_subkw = check_subfunc_name(subfunc_name)
if verbose:
print('[inspect] * Found %r' % (new_subkw,))
kwargs_list.extend(new_subkw)
except TypeError:
print('warning: unable to recursivley parse type of : %r' % (subfunc_name,))
return kwargs_list | def function[recursive_parse_kwargs, parameter[root_func, path_, verbose]]:
constant[
recursive kwargs parser
TODO: rectify with others
FIXME: if docstr indentation is off, this fails
SeeAlso:
argparse_funckw
recursive_parse_kwargs
parse_kwarg_keys
parse_func_kwarg_keys
get_func_kwargs
Args:
root_func (function): live python function
path_ (None): (default = None)
Returns:
list:
CommandLine:
python -m utool.util_inspect recursive_parse_kwargs:0
python -m utool.util_inspect recursive_parse_kwargs:0 --verbinspect
python -m utool.util_inspect recursive_parse_kwargs:1
python -m utool.util_inspect recursive_parse_kwargs:2 --mod plottool --func draw_histogram
python -m utool.util_inspect recursive_parse_kwargs:2 --mod vtool --func ScoreNormalizer.visualize
python -m utool.util_inspect recursive_parse_kwargs:2 --mod ibeis.viz.viz_matches --func show_name_matches --verbinspect
python -m utool.util_inspect recursive_parse_kwargs:2 --mod ibeis.expt.experiment_drawing --func draw_rank_cmc --verbinspect
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> root_func = iter_module_doctestable
>>> path_ = None
>>> result = ut.repr2(recursive_parse_kwargs(root_func), nl=1)
>>> print(result)
[
('include_funcs', True),
('include_classes', True),
('include_methods', True),
('include_builtin', True),
('include_inherited', False),
('debug_key', None),
]
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> from ibeis.algo.hots import chip_match
>>> import utool as ut
>>> root_func = chip_match.ChipMatch.show_ranked_matches
>>> path_ = None
>>> result = ut.repr2(recursive_parse_kwargs(root_func))
>>> print(result)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> modname = ut.get_argval('--mod', type_=str, default='plottool')
>>> funcname = ut.get_argval('--func', type_=str, default='draw_histogram')
>>> mod = ut.import_modname(modname)
>>> root_func = lookup_attribute_chain(funcname, mod.__dict__)
>>> path_ = None
>>> parsed = recursive_parse_kwargs(root_func)
>>> flags = ut.unique_flags(ut.take_column(parsed, 0))
>>> unique = ut.compress(parsed, flags)
>>> print('parsed = %s' % (ut.repr4(parsed),))
>>> print('unique = %s' % (ut.repr4(unique),))
]
if compare[name[verbose] is constant[None]] begin[:]
variable[verbose] assign[=] name[VERBOSE_INSPECT]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[inspect] recursive parse kwargs root_func = %r ] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24d3af0>]]]]]
import module[utool] as alias[ut]
if compare[name[path_] is constant[None]] begin[:]
variable[path_] assign[=] list[[]]
if compare[name[root_func] in name[path_]] begin[:]
if name[verbose] begin[:]
call[name[print], parameter[constant[[inspect] Encountered cycle. returning]]]
return[list[[]]]
call[name[path_].append, parameter[name[root_func]]]
variable[spec] assign[=] call[name[ut].get_func_argspec, parameter[name[root_func]]]
variable[kwargs_list] assign[=] list[[]]
variable[found_explicit] assign[=] call[name[list], parameter[call[call[name[ut].get_kwdefaults, parameter[name[root_func]]].items, parameter[]]]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[inspect] * Found explicit %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24d2f80>]]]]]
variable[sourcecode] assign[=] call[name[ut].get_func_sourcecode, parameter[name[root_func]]]
variable[sourcecode1] assign[=] call[name[ut].get_func_sourcecode, parameter[name[root_func]]]
variable[found_implicit] assign[=] call[name[ut].parse_kwarg_keys, parameter[name[sourcecode1], name[spec].keywords]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[inspect] * Found found_implicit %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24d2800>]]]]]
variable[kwargs_list] assign[=] binary_operation[name[found_explicit] + name[found_implicit]]
def function[hack_lookup_mod_attrs, parameter[attr]]:
if compare[name[attr] equal[==] constant[ut]] begin[:]
variable[subdict] assign[=] name[ut].__dict__
return[name[subdict]]
def function[resolve_attr_subfunc, parameter[subfunc_name]]:
variable[subdict] assign[=] call[name[meta_util_six].get_funcglobals, parameter[name[root_func]]]
variable[subtup] assign[=] call[name[subfunc_name].split, parameter[constant[.]]]
<ast.Try object at 0x7da1b24d1e70>
if compare[name[subdict] is_not constant[None]] begin[:]
variable[attr_name] assign[=] call[name[subtup]][<ast.UnaryOp object at 0x7da1b24d0bb0>]
variable[subfunc] assign[=] call[name[subdict]][name[attr_name]]
return[name[subfunc]]
def function[check_subfunc_name, parameter[subfunc_name]]:
if <ast.BoolOp object at 0x7da1b24d1e10> begin[:]
variable[subfunc] assign[=] call[name[resolve_attr_subfunc], parameter[name[subfunc_name]]]
if compare[name[subfunc] is_not constant[None]] begin[:]
variable[subkw_list] assign[=] call[name[recursive_parse_kwargs], parameter[name[subfunc], name[path_]]]
variable[new_subkw] assign[=] name[subkw_list]
return[name[new_subkw]]
if compare[name[spec].keywords is_not constant[None]] begin[:]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[inspect] Checking spec.keywords=%r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b246b760>]]]]]
variable[subfunc_name_list] assign[=] call[name[ut].find_funcs_called_with_kwargs, parameter[name[sourcecode], name[spec].keywords]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[inspect] Checking subfunc_name_list=%r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b24e64a0>]]]]]
for taget[name[subfunc_name]] in starred[name[subfunc_name_list]] begin[:]
<ast.Try object at 0x7da1b24eb130>
return[name[kwargs_list]] | keyword[def] identifier[recursive_parse_kwargs] ( identifier[root_func] , identifier[path_] = keyword[None] , identifier[verbose] = keyword[None] ):
literal[string]
keyword[if] identifier[verbose] keyword[is] keyword[None] :
identifier[verbose] = identifier[VERBOSE_INSPECT]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[root_func] ,))
keyword[import] identifier[utool] keyword[as] identifier[ut]
keyword[if] identifier[path_] keyword[is] keyword[None] :
identifier[path_] =[]
keyword[if] identifier[root_func] keyword[in] identifier[path_] :
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] )
keyword[return] []
identifier[path_] . identifier[append] ( identifier[root_func] )
identifier[spec] = identifier[ut] . identifier[get_func_argspec] ( identifier[root_func] )
identifier[kwargs_list] =[]
identifier[found_explicit] = identifier[list] ( identifier[ut] . identifier[get_kwdefaults] ( identifier[root_func] , identifier[parse_source] = keyword[False] ). identifier[items] ())
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[found_explicit] ,))
identifier[sourcecode] = identifier[ut] . identifier[get_func_sourcecode] ( identifier[root_func] , identifier[strip_docstr] = keyword[True] ,
identifier[stripdef] = keyword[True] )
identifier[sourcecode1] = identifier[ut] . identifier[get_func_sourcecode] ( identifier[root_func] , identifier[strip_docstr] = keyword[True] ,
identifier[stripdef] = keyword[False] )
identifier[found_implicit] = identifier[ut] . identifier[parse_kwarg_keys] ( identifier[sourcecode1] , identifier[spec] . identifier[keywords] ,
identifier[with_vals] = keyword[True] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[found_implicit] ,))
identifier[kwargs_list] = identifier[found_explicit] + identifier[found_implicit]
keyword[def] identifier[hack_lookup_mod_attrs] ( identifier[attr] ):
keyword[if] identifier[attr] == literal[string] :
identifier[subdict] = identifier[ut] . identifier[__dict__]
keyword[elif] identifier[attr] == literal[string] :
keyword[import] identifier[plottool] keyword[as] identifier[pt]
identifier[subdict] = identifier[pt] . identifier[__dict__]
keyword[else] :
identifier[subdict] = keyword[None]
keyword[return] identifier[subdict]
keyword[def] identifier[resolve_attr_subfunc] ( identifier[subfunc_name] ):
identifier[subdict] = identifier[meta_util_six] . identifier[get_funcglobals] ( identifier[root_func] )
identifier[subtup] = identifier[subfunc_name] . identifier[split] ( literal[string] )
keyword[try] :
identifier[subdict] = identifier[lookup_attribute_chain] ( identifier[subfunc_name] , identifier[subdict] )
keyword[if] identifier[ut] . identifier[is_func_or_method] ( identifier[subdict] ):
identifier[subfunc] = identifier[subdict]
keyword[return] identifier[subfunc]
keyword[except] ( identifier[KeyError] , identifier[TypeError] ):
keyword[for] identifier[attr] keyword[in] identifier[subtup] [:- literal[int] ]:
keyword[try] :
identifier[subdict] = identifier[subdict] [ identifier[attr] ]. identifier[__dict__]
keyword[except] ( identifier[KeyError] , identifier[TypeError] ):
keyword[if] identifier[ut] . identifier[is_method] ( identifier[root_func] ) keyword[and] identifier[spec] . identifier[args] [ literal[int] ]== identifier[attr] :
identifier[subdict] = identifier[root_func] . identifier[im_class] . identifier[__dict__]
keyword[else] :
identifier[subdict] = identifier[hack_lookup_mod_attrs] ( identifier[attr] )
keyword[if] identifier[subdict] keyword[is] keyword[None] :
identifier[print] ( literal[string] %( identifier[attr] ,))
keyword[if] identifier[ut] . identifier[SUPER_STRICT] :
keyword[raise]
keyword[if] identifier[subdict] keyword[is] keyword[not] keyword[None] :
identifier[attr_name] = identifier[subtup] [- literal[int] ]
identifier[subfunc] = identifier[subdict] [ identifier[attr_name] ]
keyword[else] :
identifier[subfunc] = keyword[None]
keyword[return] identifier[subfunc]
keyword[def] identifier[check_subfunc_name] ( identifier[subfunc_name] ):
keyword[if] identifier[isinstance] ( identifier[subfunc_name] , identifier[tuple] ) keyword[or] literal[string] keyword[in] identifier[subfunc_name] :
identifier[subfunc] = identifier[resolve_attr_subfunc] ( identifier[subfunc_name] )
keyword[else] :
identifier[func_globals] = identifier[meta_util_six] . identifier[get_funcglobals] ( identifier[root_func] )
keyword[try] :
identifier[subfunc] = identifier[func_globals] [ identifier[subfunc_name] ]
keyword[except] identifier[KeyError] :
identifier[print] ( literal[string] %
( identifier[subfunc_name] ,))
keyword[if] identifier[ut] . identifier[SUPER_STRICT] :
keyword[raise]
identifier[subfunc] = keyword[None]
keyword[if] identifier[subfunc] keyword[is] keyword[not] keyword[None] :
identifier[subkw_list] = identifier[recursive_parse_kwargs] ( identifier[subfunc] , identifier[path_] , identifier[verbose] = identifier[verbose] )
identifier[new_subkw] = identifier[subkw_list]
keyword[else] :
identifier[new_subkw] =[]
keyword[return] identifier[new_subkw]
keyword[if] identifier[spec] . identifier[keywords] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[spec] . identifier[keywords] ,))
identifier[subfunc_name_list] = identifier[ut] . identifier[find_funcs_called_with_kwargs] ( identifier[sourcecode] , identifier[spec] . identifier[keywords] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[subfunc_name_list] ,))
keyword[for] identifier[subfunc_name] keyword[in] identifier[subfunc_name_list] :
keyword[try] :
identifier[new_subkw] = identifier[check_subfunc_name] ( identifier[subfunc_name] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] %( identifier[new_subkw] ,))
identifier[kwargs_list] . identifier[extend] ( identifier[new_subkw] )
keyword[except] identifier[TypeError] :
identifier[print] ( literal[string] %( identifier[subfunc_name] ,))
keyword[return] identifier[kwargs_list] | def recursive_parse_kwargs(root_func, path_=None, verbose=None):
"""
recursive kwargs parser
TODO: rectify with others
FIXME: if docstr indentation is off, this fails
SeeAlso:
argparse_funckw
recursive_parse_kwargs
parse_kwarg_keys
parse_func_kwarg_keys
get_func_kwargs
Args:
root_func (function): live python function
path_ (None): (default = None)
Returns:
list:
CommandLine:
python -m utool.util_inspect recursive_parse_kwargs:0
python -m utool.util_inspect recursive_parse_kwargs:0 --verbinspect
python -m utool.util_inspect recursive_parse_kwargs:1
python -m utool.util_inspect recursive_parse_kwargs:2 --mod plottool --func draw_histogram
python -m utool.util_inspect recursive_parse_kwargs:2 --mod vtool --func ScoreNormalizer.visualize
python -m utool.util_inspect recursive_parse_kwargs:2 --mod ibeis.viz.viz_matches --func show_name_matches --verbinspect
python -m utool.util_inspect recursive_parse_kwargs:2 --mod ibeis.expt.experiment_drawing --func draw_rank_cmc --verbinspect
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> root_func = iter_module_doctestable
>>> path_ = None
>>> result = ut.repr2(recursive_parse_kwargs(root_func), nl=1)
>>> print(result)
[
('include_funcs', True),
('include_classes', True),
('include_methods', True),
('include_builtin', True),
('include_inherited', False),
('debug_key', None),
]
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> from ibeis.algo.hots import chip_match
>>> import utool as ut
>>> root_func = chip_match.ChipMatch.show_ranked_matches
>>> path_ = None
>>> result = ut.repr2(recursive_parse_kwargs(root_func))
>>> print(result)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_inspect import * # NOQA
>>> import utool as ut
>>> modname = ut.get_argval('--mod', type_=str, default='plottool')
>>> funcname = ut.get_argval('--func', type_=str, default='draw_histogram')
>>> mod = ut.import_modname(modname)
>>> root_func = lookup_attribute_chain(funcname, mod.__dict__)
>>> path_ = None
>>> parsed = recursive_parse_kwargs(root_func)
>>> flags = ut.unique_flags(ut.take_column(parsed, 0))
>>> unique = ut.compress(parsed, flags)
>>> print('parsed = %s' % (ut.repr4(parsed),))
>>> print('unique = %s' % (ut.repr4(unique),))
"""
if verbose is None:
verbose = VERBOSE_INSPECT # depends on [control=['if'], data=['verbose']]
if verbose:
print('[inspect] recursive parse kwargs root_func = %r ' % (root_func,)) # depends on [control=['if'], data=[]]
import utool as ut
if path_ is None:
path_ = [] # depends on [control=['if'], data=['path_']]
if root_func in path_:
if verbose:
print('[inspect] Encountered cycle. returning') # depends on [control=['if'], data=[]]
return [] # depends on [control=['if'], data=[]]
path_.append(root_func)
spec = ut.get_func_argspec(root_func)
# ADD MORE
kwargs_list = []
found_explicit = list(ut.get_kwdefaults(root_func, parse_source=False).items())
if verbose:
print('[inspect] * Found explicit %r' % (found_explicit,)) # depends on [control=['if'], data=[]]
#kwargs_list = [(kw,) for kw in ut.get_kwargs(root_func)[0]]
sourcecode = ut.get_func_sourcecode(root_func, strip_docstr=True, stripdef=True)
sourcecode1 = ut.get_func_sourcecode(root_func, strip_docstr=True, stripdef=False)
found_implicit = ut.parse_kwarg_keys(sourcecode1, spec.keywords, with_vals=True)
if verbose:
print('[inspect] * Found found_implicit %r' % (found_implicit,)) # depends on [control=['if'], data=[]]
kwargs_list = found_explicit + found_implicit
def hack_lookup_mod_attrs(attr):
# HACKS TODO: have find_funcs_called_with_kwargs infer an attribute is a
# module / function / type. In the module case, we can import it and
# look it up. Maybe args, or returns can help infer type. Maybe just
# register some known varnames. Maybe jedi has some better way to do
# this.
if attr == 'ut':
subdict = ut.__dict__ # depends on [control=['if'], data=[]]
elif attr == 'pt':
import plottool as pt
subdict = pt.__dict__ # depends on [control=['if'], data=[]]
else:
subdict = None
return subdict
def resolve_attr_subfunc(subfunc_name):
# look up attriute chain
#subdict = root_func.func_globals
subdict = meta_util_six.get_funcglobals(root_func)
subtup = subfunc_name.split('.')
try:
subdict = lookup_attribute_chain(subfunc_name, subdict)
if ut.is_func_or_method(subdict):
# Was subdict supposed to be named something else here?
subfunc = subdict
return subfunc # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (KeyError, TypeError):
for attr in subtup[:-1]:
try:
subdict = subdict[attr].__dict__ # depends on [control=['try'], data=[]]
except (KeyError, TypeError):
# limited support for class lookup
if ut.is_method(root_func) and spec.args[0] == attr:
subdict = root_func.im_class.__dict__ # depends on [control=['if'], data=[]]
else:
# FIXME TODO lookup_attribute_chain
subdict = hack_lookup_mod_attrs(attr)
if subdict is None:
print('Unable to find attribute of attr=%r' % (attr,))
if ut.SUPER_STRICT:
raise # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['attr']] # depends on [control=['except'], data=[]]
if subdict is not None:
attr_name = subtup[-1]
subfunc = subdict[attr_name] # depends on [control=['if'], data=['subdict']]
else:
subfunc = None
return subfunc
def check_subfunc_name(subfunc_name):
if isinstance(subfunc_name, tuple) or '.' in subfunc_name:
subfunc = resolve_attr_subfunc(subfunc_name) # depends on [control=['if'], data=[]]
else:
# try to directly take func from globals
func_globals = meta_util_six.get_funcglobals(root_func)
try:
subfunc = func_globals[subfunc_name] # depends on [control=['try'], data=[]]
except KeyError:
print('Unable to find function definition subfunc_name=%r' % (subfunc_name,))
if ut.SUPER_STRICT:
raise # depends on [control=['if'], data=[]]
subfunc = None # depends on [control=['except'], data=[]]
if subfunc is not None:
subkw_list = recursive_parse_kwargs(subfunc, path_, verbose=verbose)
new_subkw = subkw_list # depends on [control=['if'], data=['subfunc']]
else:
# have_keys = set(ut.take_column(kwargs_list, 0))
# new_subkw = [item for item in subkw_list
# if item[0] not in have_keys]
new_subkw = []
return new_subkw
if spec.keywords is not None:
if verbose:
print('[inspect] Checking spec.keywords=%r' % (spec.keywords,)) # depends on [control=['if'], data=[]]
subfunc_name_list = ut.find_funcs_called_with_kwargs(sourcecode, spec.keywords)
if verbose:
print('[inspect] Checking subfunc_name_list=%r' % (subfunc_name_list,)) # depends on [control=['if'], data=[]]
for subfunc_name in subfunc_name_list:
try:
new_subkw = check_subfunc_name(subfunc_name)
if verbose:
print('[inspect] * Found %r' % (new_subkw,)) # depends on [control=['if'], data=[]]
kwargs_list.extend(new_subkw) # depends on [control=['try'], data=[]]
except TypeError:
print('warning: unable to recursivley parse type of : %r' % (subfunc_name,)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['subfunc_name']] # depends on [control=['if'], data=[]]
return kwargs_list |
def _solve_pkg(main_globals):
'''
Find parent python path of __main__. From there solve the package
containing __main__, import it and set __package__ variable.
:param main_globals: globals dictionary in __main__
'''
# find __main__'s file directory and search path
main_dir, search_path = _try_search_paths(main_globals)
if not search_path:
_log_debug('Could not solve parent python path for %r' % main_dir)
# no candidates for search path, return
return
# solve package name from search path
pkg_str = path.relpath(main_dir, search_path).replace(path.sep, '.')
# Remove wrong starting string for site-packages
site_pkgs = 'site-packages.'
if pkg_str.startswith(site_pkgs):
pkg_str = pkg_str[len(site_pkgs):]
assert pkg_str
_log_debug('pkg_str=%r' % pkg_str)
# import the package in order to set __package__ value later
try:
if '__init__.py' in main_globals['__file__']:
_log_debug('__init__ script. This module is its own package')
# The __main__ is __init__.py => its own package
# If we treat it as a normal module it would be imported twice
# So we simply reuse it
sys.modules[pkg_str] = sys.modules['__main__']
# We need to set __path__ because its needed for
# relative importing
sys.modules[pkg_str].__path__ = [main_dir]
# We need to import parent package, something that would
# happen automatically in non-faked import
parent_pkg_str = '.'.join(pkg_str.split('.')[:-1])
if parent_pkg_str:
importlib.import_module(parent_pkg_str)
else:
_log_debug('Importing package %r' % pkg_str)
# we need to import the package to be available
importlib.import_module(pkg_str)
# finally enable relative import
main_globals['__package__'] = pkg_str
return pkg_str
except ImportError as e:
# In many situations we won't care if it fails, simply report error
# main will fail anyway if finds an explicit relative import
_print_exc(e) | def function[_solve_pkg, parameter[main_globals]]:
constant[
Find parent python path of __main__. From there solve the package
containing __main__, import it and set __package__ variable.
:param main_globals: globals dictionary in __main__
]
<ast.Tuple object at 0x7da1b004b610> assign[=] call[name[_try_search_paths], parameter[name[main_globals]]]
if <ast.UnaryOp object at 0x7da1b004a110> begin[:]
call[name[_log_debug], parameter[binary_operation[constant[Could not solve parent python path for %r] <ast.Mod object at 0x7da2590d6920> name[main_dir]]]]
return[None]
variable[pkg_str] assign[=] call[call[name[path].relpath, parameter[name[main_dir], name[search_path]]].replace, parameter[name[path].sep, constant[.]]]
variable[site_pkgs] assign[=] constant[site-packages.]
if call[name[pkg_str].startswith, parameter[name[site_pkgs]]] begin[:]
variable[pkg_str] assign[=] call[name[pkg_str]][<ast.Slice object at 0x7da1b004ab30>]
assert[name[pkg_str]]
call[name[_log_debug], parameter[binary_operation[constant[pkg_str=%r] <ast.Mod object at 0x7da2590d6920> name[pkg_str]]]]
<ast.Try object at 0x7da1b004bc10> | keyword[def] identifier[_solve_pkg] ( identifier[main_globals] ):
literal[string]
identifier[main_dir] , identifier[search_path] = identifier[_try_search_paths] ( identifier[main_globals] )
keyword[if] keyword[not] identifier[search_path] :
identifier[_log_debug] ( literal[string] % identifier[main_dir] )
keyword[return]
identifier[pkg_str] = identifier[path] . identifier[relpath] ( identifier[main_dir] , identifier[search_path] ). identifier[replace] ( identifier[path] . identifier[sep] , literal[string] )
identifier[site_pkgs] = literal[string]
keyword[if] identifier[pkg_str] . identifier[startswith] ( identifier[site_pkgs] ):
identifier[pkg_str] = identifier[pkg_str] [ identifier[len] ( identifier[site_pkgs] ):]
keyword[assert] identifier[pkg_str]
identifier[_log_debug] ( literal[string] % identifier[pkg_str] )
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[main_globals] [ literal[string] ]:
identifier[_log_debug] ( literal[string] )
identifier[sys] . identifier[modules] [ identifier[pkg_str] ]= identifier[sys] . identifier[modules] [ literal[string] ]
identifier[sys] . identifier[modules] [ identifier[pkg_str] ]. identifier[__path__] =[ identifier[main_dir] ]
identifier[parent_pkg_str] = literal[string] . identifier[join] ( identifier[pkg_str] . identifier[split] ( literal[string] )[:- literal[int] ])
keyword[if] identifier[parent_pkg_str] :
identifier[importlib] . identifier[import_module] ( identifier[parent_pkg_str] )
keyword[else] :
identifier[_log_debug] ( literal[string] % identifier[pkg_str] )
identifier[importlib] . identifier[import_module] ( identifier[pkg_str] )
identifier[main_globals] [ literal[string] ]= identifier[pkg_str]
keyword[return] identifier[pkg_str]
keyword[except] identifier[ImportError] keyword[as] identifier[e] :
identifier[_print_exc] ( identifier[e] ) | def _solve_pkg(main_globals):
"""
Find parent python path of __main__. From there solve the package
containing __main__, import it and set __package__ variable.
:param main_globals: globals dictionary in __main__
"""
# find __main__'s file directory and search path
(main_dir, search_path) = _try_search_paths(main_globals)
if not search_path:
_log_debug('Could not solve parent python path for %r' % main_dir)
# no candidates for search path, return
return # depends on [control=['if'], data=[]]
# solve package name from search path
pkg_str = path.relpath(main_dir, search_path).replace(path.sep, '.')
# Remove wrong starting string for site-packages
site_pkgs = 'site-packages.'
if pkg_str.startswith(site_pkgs):
pkg_str = pkg_str[len(site_pkgs):] # depends on [control=['if'], data=[]]
assert pkg_str
_log_debug('pkg_str=%r' % pkg_str)
# import the package in order to set __package__ value later
try:
if '__init__.py' in main_globals['__file__']:
_log_debug('__init__ script. This module is its own package')
# The __main__ is __init__.py => its own package
# If we treat it as a normal module it would be imported twice
# So we simply reuse it
sys.modules[pkg_str] = sys.modules['__main__']
# We need to set __path__ because its needed for
# relative importing
sys.modules[pkg_str].__path__ = [main_dir]
# We need to import parent package, something that would
# happen automatically in non-faked import
parent_pkg_str = '.'.join(pkg_str.split('.')[:-1])
if parent_pkg_str:
importlib.import_module(parent_pkg_str) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
_log_debug('Importing package %r' % pkg_str)
# we need to import the package to be available
importlib.import_module(pkg_str)
# finally enable relative import
main_globals['__package__'] = pkg_str
return pkg_str # depends on [control=['try'], data=[]]
except ImportError as e:
# In many situations we won't care if it fails, simply report error
# main will fail anyway if finds an explicit relative import
_print_exc(e) # depends on [control=['except'], data=['e']] |
def storm_relative_helicity(u, v, heights, depth, bottom=0 * units.m,
storm_u=0 * units('m/s'), storm_v=0 * units('m/s')):
# Partially adapted from similar SharpPy code
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
"""
_, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)
storm_relative_u = u - storm_u
storm_relative_v = v - storm_v
int_layers = (storm_relative_u[1:] * storm_relative_v[:-1]
- storm_relative_u[:-1] * storm_relative_v[1:])
# Need to manually check for masked value because sum() on masked array with non-default
# mask will return a masked value rather than 0. See numpy/numpy#11736
positive_srh = int_layers[int_layers.magnitude > 0.].sum()
if np.ma.is_masked(positive_srh):
positive_srh = 0.0 * units('meter**2 / second**2')
negative_srh = int_layers[int_layers.magnitude < 0.].sum()
if np.ma.is_masked(negative_srh):
negative_srh = 0.0 * units('meter**2 / second**2')
return (positive_srh.to('meter ** 2 / second ** 2'),
negative_srh.to('meter ** 2 / second ** 2'),
(positive_srh + negative_srh).to('meter ** 2 / second ** 2')) | def function[storm_relative_helicity, parameter[u, v, heights, depth, bottom, storm_u, storm_v]]:
constant[Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
]
<ast.Tuple object at 0x7da1b1d04070> assign[=] call[name[get_layer_heights], parameter[name[heights], name[depth], name[u], name[v]]]
variable[storm_relative_u] assign[=] binary_operation[name[u] - name[storm_u]]
variable[storm_relative_v] assign[=] binary_operation[name[v] - name[storm_v]]
variable[int_layers] assign[=] binary_operation[binary_operation[call[name[storm_relative_u]][<ast.Slice object at 0x7da1b1d06f20>] * call[name[storm_relative_v]][<ast.Slice object at 0x7da1b1d044f0>]] - binary_operation[call[name[storm_relative_u]][<ast.Slice object at 0x7da1b1d34910>] * call[name[storm_relative_v]][<ast.Slice object at 0x7da1b2297730>]]]
variable[positive_srh] assign[=] call[call[name[int_layers]][compare[name[int_layers].magnitude greater[>] constant[0.0]]].sum, parameter[]]
if call[name[np].ma.is_masked, parameter[name[positive_srh]]] begin[:]
variable[positive_srh] assign[=] binary_operation[constant[0.0] * call[name[units], parameter[constant[meter**2 / second**2]]]]
variable[negative_srh] assign[=] call[call[name[int_layers]][compare[name[int_layers].magnitude less[<] constant[0.0]]].sum, parameter[]]
if call[name[np].ma.is_masked, parameter[name[negative_srh]]] begin[:]
variable[negative_srh] assign[=] binary_operation[constant[0.0] * call[name[units], parameter[constant[meter**2 / second**2]]]]
return[tuple[[<ast.Call object at 0x7da1b1d36a40>, <ast.Call object at 0x7da1b1d376d0>, <ast.Call object at 0x7da1b1d346d0>]]] | keyword[def] identifier[storm_relative_helicity] ( identifier[u] , identifier[v] , identifier[heights] , identifier[depth] , identifier[bottom] = literal[int] * identifier[units] . identifier[m] ,
identifier[storm_u] = literal[int] * identifier[units] ( literal[string] ), identifier[storm_v] = literal[int] * identifier[units] ( literal[string] )):
literal[string]
identifier[_] , identifier[u] , identifier[v] = identifier[get_layer_heights] ( identifier[heights] , identifier[depth] , identifier[u] , identifier[v] , identifier[with_agl] = keyword[True] , identifier[bottom] = identifier[bottom] )
identifier[storm_relative_u] = identifier[u] - identifier[storm_u]
identifier[storm_relative_v] = identifier[v] - identifier[storm_v]
identifier[int_layers] =( identifier[storm_relative_u] [ literal[int] :]* identifier[storm_relative_v] [:- literal[int] ]
- identifier[storm_relative_u] [:- literal[int] ]* identifier[storm_relative_v] [ literal[int] :])
identifier[positive_srh] = identifier[int_layers] [ identifier[int_layers] . identifier[magnitude] > literal[int] ]. identifier[sum] ()
keyword[if] identifier[np] . identifier[ma] . identifier[is_masked] ( identifier[positive_srh] ):
identifier[positive_srh] = literal[int] * identifier[units] ( literal[string] )
identifier[negative_srh] = identifier[int_layers] [ identifier[int_layers] . identifier[magnitude] < literal[int] ]. identifier[sum] ()
keyword[if] identifier[np] . identifier[ma] . identifier[is_masked] ( identifier[negative_srh] ):
identifier[negative_srh] = literal[int] * identifier[units] ( literal[string] )
keyword[return] ( identifier[positive_srh] . identifier[to] ( literal[string] ),
identifier[negative_srh] . identifier[to] ( literal[string] ),
( identifier[positive_srh] + identifier[negative_srh] ). identifier[to] ( literal[string] )) | def storm_relative_helicity(u, v, heights, depth, bottom=0 * units.m, storm_u=0 * units('m/s'), storm_v=0 * units('m/s')):
# Partially adapted from similar SharpPy code
'Calculate storm relative helicity.\n\n Calculates storm relatively helicity following [Markowski2010] 230-231.\n\n .. math:: \\int\\limits_0^d (\\bar v - c) \\cdot \\bar\\omega_{h} \\,dz\n\n This is applied to the data from a hodograph with the following summation:\n\n .. math:: \\sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -\n (u_{n} - c_{x})(v_{n+1} - c_{y})]\n\n Parameters\n ----------\n u : array-like\n u component winds\n v : array-like\n v component winds\n heights : array-like\n atmospheric heights, will be converted to AGL\n depth : number\n depth of the layer\n bottom : number\n height of layer bottom AGL (default is surface)\n storm_u : number\n u component of storm motion (default is 0 m/s)\n storm_v : number\n v component of storm motion (default is 0 m/s)\n\n Returns\n -------\n `pint.Quantity, pint.Quantity, pint.Quantity`\n positive, negative, total storm-relative helicity\n\n '
(_, u, v) = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)
storm_relative_u = u - storm_u
storm_relative_v = v - storm_v
int_layers = storm_relative_u[1:] * storm_relative_v[:-1] - storm_relative_u[:-1] * storm_relative_v[1:]
# Need to manually check for masked value because sum() on masked array with non-default
# mask will return a masked value rather than 0. See numpy/numpy#11736
positive_srh = int_layers[int_layers.magnitude > 0.0].sum()
if np.ma.is_masked(positive_srh):
positive_srh = 0.0 * units('meter**2 / second**2') # depends on [control=['if'], data=[]]
negative_srh = int_layers[int_layers.magnitude < 0.0].sum()
if np.ma.is_masked(negative_srh):
negative_srh = 0.0 * units('meter**2 / second**2') # depends on [control=['if'], data=[]]
return (positive_srh.to('meter ** 2 / second ** 2'), negative_srh.to('meter ** 2 / second ** 2'), (positive_srh + negative_srh).to('meter ** 2 / second ** 2')) |
def _show_final_overflow_message(self, row_overflow, col_overflow):
"""Displays overflow message after import in statusbar"""
if row_overflow and col_overflow:
overflow_cause = _("rows and columns")
elif row_overflow:
overflow_cause = _("rows")
elif col_overflow:
overflow_cause = _("columns")
else:
raise AssertionError(_("Import cell overflow missing"))
statustext = \
_("The imported data did not fit into the grid {cause}. "
"It has been truncated. Use a larger grid for full import.").\
format(cause=overflow_cause)
post_command_event(self.main_window, self.StatusBarMsg,
text=statustext) | def function[_show_final_overflow_message, parameter[self, row_overflow, col_overflow]]:
constant[Displays overflow message after import in statusbar]
if <ast.BoolOp object at 0x7da1b16a55a0> begin[:]
variable[overflow_cause] assign[=] call[name[_], parameter[constant[rows and columns]]]
variable[statustext] assign[=] call[call[name[_], parameter[constant[The imported data did not fit into the grid {cause}. It has been truncated. Use a larger grid for full import.]]].format, parameter[]]
call[name[post_command_event], parameter[name[self].main_window, name[self].StatusBarMsg]] | keyword[def] identifier[_show_final_overflow_message] ( identifier[self] , identifier[row_overflow] , identifier[col_overflow] ):
literal[string]
keyword[if] identifier[row_overflow] keyword[and] identifier[col_overflow] :
identifier[overflow_cause] = identifier[_] ( literal[string] )
keyword[elif] identifier[row_overflow] :
identifier[overflow_cause] = identifier[_] ( literal[string] )
keyword[elif] identifier[col_overflow] :
identifier[overflow_cause] = identifier[_] ( literal[string] )
keyword[else] :
keyword[raise] identifier[AssertionError] ( identifier[_] ( literal[string] ))
identifier[statustext] = identifier[_] ( literal[string]
literal[string] ). identifier[format] ( identifier[cause] = identifier[overflow_cause] )
identifier[post_command_event] ( identifier[self] . identifier[main_window] , identifier[self] . identifier[StatusBarMsg] ,
identifier[text] = identifier[statustext] ) | def _show_final_overflow_message(self, row_overflow, col_overflow):
"""Displays overflow message after import in statusbar"""
if row_overflow and col_overflow:
overflow_cause = _('rows and columns') # depends on [control=['if'], data=[]]
elif row_overflow:
overflow_cause = _('rows') # depends on [control=['if'], data=[]]
elif col_overflow:
overflow_cause = _('columns') # depends on [control=['if'], data=[]]
else:
raise AssertionError(_('Import cell overflow missing'))
statustext = _('The imported data did not fit into the grid {cause}. It has been truncated. Use a larger grid for full import.').format(cause=overflow_cause)
post_command_event(self.main_window, self.StatusBarMsg, text=statustext) |
def convolve(image, pixel_filter, channels=3, name=None):
"""Perform a 2D pixel convolution on the given image.
Arguments:
image: A 3D `float32` `Tensor` of shape `[height, width, channels]`,
where `channels` is the third argument to this function and the
first two dimensions are arbitrary.
pixel_filter: A 2D `Tensor`, representing pixel weightings for the
kernel. This will be used to create a 4D kernel---the extra two
dimensions are for channels (see `tf.nn.conv2d` documentation),
and the kernel will be constructed so that the channels are
independent: each channel only observes the data from neighboring
pixels of the same channel.
channels: An integer representing the number of channels in the
image (e.g., 3 for RGB).
Returns:
A 3D `float32` `Tensor` of the same shape as the input.
"""
with tf.name_scope(name, 'convolve'):
tf.compat.v1.assert_type(image, tf.float32)
channel_filter = tf.eye(channels)
filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) *
tf.expand_dims(tf.expand_dims(channel_filter, 0), 0))
result_batch = tf.nn.conv2d(tf.stack([image]), # batch
filter=filter_,
strides=[1, 1, 1, 1],
padding='SAME')
return result_batch[0] | def function[convolve, parameter[image, pixel_filter, channels, name]]:
constant[Perform a 2D pixel convolution on the given image.
Arguments:
image: A 3D `float32` `Tensor` of shape `[height, width, channels]`,
where `channels` is the third argument to this function and the
first two dimensions are arbitrary.
pixel_filter: A 2D `Tensor`, representing pixel weightings for the
kernel. This will be used to create a 4D kernel---the extra two
dimensions are for channels (see `tf.nn.conv2d` documentation),
and the kernel will be constructed so that the channels are
independent: each channel only observes the data from neighboring
pixels of the same channel.
channels: An integer representing the number of channels in the
image (e.g., 3 for RGB).
Returns:
A 3D `float32` `Tensor` of the same shape as the input.
]
with call[name[tf].name_scope, parameter[name[name], constant[convolve]]] begin[:]
call[name[tf].compat.v1.assert_type, parameter[name[image], name[tf].float32]]
variable[channel_filter] assign[=] call[name[tf].eye, parameter[name[channels]]]
variable[filter_] assign[=] binary_operation[call[name[tf].expand_dims, parameter[call[name[tf].expand_dims, parameter[name[pixel_filter], <ast.UnaryOp object at 0x7da1b1f9b730>]], <ast.UnaryOp object at 0x7da1b1f98a90>]] * call[name[tf].expand_dims, parameter[call[name[tf].expand_dims, parameter[name[channel_filter], constant[0]]], constant[0]]]]
variable[result_batch] assign[=] call[name[tf].nn.conv2d, parameter[call[name[tf].stack, parameter[list[[<ast.Name object at 0x7da1b21e1450>]]]]]]
return[call[name[result_batch]][constant[0]]] | keyword[def] identifier[convolve] ( identifier[image] , identifier[pixel_filter] , identifier[channels] = literal[int] , identifier[name] = keyword[None] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( identifier[name] , literal[string] ):
identifier[tf] . identifier[compat] . identifier[v1] . identifier[assert_type] ( identifier[image] , identifier[tf] . identifier[float32] )
identifier[channel_filter] = identifier[tf] . identifier[eye] ( identifier[channels] )
identifier[filter_] =( identifier[tf] . identifier[expand_dims] ( identifier[tf] . identifier[expand_dims] ( identifier[pixel_filter] ,- literal[int] ),- literal[int] )*
identifier[tf] . identifier[expand_dims] ( identifier[tf] . identifier[expand_dims] ( identifier[channel_filter] , literal[int] ), literal[int] ))
identifier[result_batch] = identifier[tf] . identifier[nn] . identifier[conv2d] ( identifier[tf] . identifier[stack] ([ identifier[image] ]),
identifier[filter] = identifier[filter_] ,
identifier[strides] =[ literal[int] , literal[int] , literal[int] , literal[int] ],
identifier[padding] = literal[string] )
keyword[return] identifier[result_batch] [ literal[int] ] | def convolve(image, pixel_filter, channels=3, name=None):
"""Perform a 2D pixel convolution on the given image.
Arguments:
image: A 3D `float32` `Tensor` of shape `[height, width, channels]`,
where `channels` is the third argument to this function and the
first two dimensions are arbitrary.
pixel_filter: A 2D `Tensor`, representing pixel weightings for the
kernel. This will be used to create a 4D kernel---the extra two
dimensions are for channels (see `tf.nn.conv2d` documentation),
and the kernel will be constructed so that the channels are
independent: each channel only observes the data from neighboring
pixels of the same channel.
channels: An integer representing the number of channels in the
image (e.g., 3 for RGB).
Returns:
A 3D `float32` `Tensor` of the same shape as the input.
"""
with tf.name_scope(name, 'convolve'):
tf.compat.v1.assert_type(image, tf.float32)
channel_filter = tf.eye(channels)
filter_ = tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) * tf.expand_dims(tf.expand_dims(channel_filter, 0), 0) # batch
result_batch = tf.nn.conv2d(tf.stack([image]), filter=filter_, strides=[1, 1, 1, 1], padding='SAME')
return result_batch[0] # depends on [control=['with'], data=[]] |
def same_values(l1,l2):
'''
from elist.elist import *
l1 = [1,2,3,5]
l2 = [0,2,3,4]
same_values(l1,l2)
'''
rslt = []
for i in range(0,l1.__len__()):
if(l1[i]==l2[i]):
rslt.append(l1[i])
return(rslt) | def function[same_values, parameter[l1, l2]]:
constant[
from elist.elist import *
l1 = [1,2,3,5]
l2 = [0,2,3,4]
same_values(l1,l2)
]
variable[rslt] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[l1].__len__, parameter[]]]]] begin[:]
if compare[call[name[l1]][name[i]] equal[==] call[name[l2]][name[i]]] begin[:]
call[name[rslt].append, parameter[call[name[l1]][name[i]]]]
return[name[rslt]] | keyword[def] identifier[same_values] ( identifier[l1] , identifier[l2] ):
literal[string]
identifier[rslt] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[l1] . identifier[__len__] ()):
keyword[if] ( identifier[l1] [ identifier[i] ]== identifier[l2] [ identifier[i] ]):
identifier[rslt] . identifier[append] ( identifier[l1] [ identifier[i] ])
keyword[return] ( identifier[rslt] ) | def same_values(l1, l2):
"""
from elist.elist import *
l1 = [1,2,3,5]
l2 = [0,2,3,4]
same_values(l1,l2)
"""
rslt = []
for i in range(0, l1.__len__()):
if l1[i] == l2[i]:
rslt.append(l1[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return rslt |
def _forward(self):
"""Advance to the next token.
Internal methods, updates:
- self.current_token
- self.current_pos
Raises:
MissingTokensError: when trying to advance beyond the end of the
token flow.
"""
try:
self.current_token = next(self.tokens)
except StopIteration:
raise MissingTokensError("Unexpected end of token stream at %d." %
self.current_pos)
self.current_pos += 1 | def function[_forward, parameter[self]]:
constant[Advance to the next token.
Internal methods, updates:
- self.current_token
- self.current_pos
Raises:
MissingTokensError: when trying to advance beyond the end of the
token flow.
]
<ast.Try object at 0x7da20c990520>
<ast.AugAssign object at 0x7da20c991930> | keyword[def] identifier[_forward] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[current_token] = identifier[next] ( identifier[self] . identifier[tokens] )
keyword[except] identifier[StopIteration] :
keyword[raise] identifier[MissingTokensError] ( literal[string] %
identifier[self] . identifier[current_pos] )
identifier[self] . identifier[current_pos] += literal[int] | def _forward(self):
"""Advance to the next token.
Internal methods, updates:
- self.current_token
- self.current_pos
Raises:
MissingTokensError: when trying to advance beyond the end of the
token flow.
"""
try:
self.current_token = next(self.tokens) # depends on [control=['try'], data=[]]
except StopIteration:
raise MissingTokensError('Unexpected end of token stream at %d.' % self.current_pos) # depends on [control=['except'], data=[]]
self.current_pos += 1 |
def population_chart_extractor(impact_report, component_metadata):
"""Creating population donut chart.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
extra_args = component_metadata.extra_args
analysis_layer = impact_report.analysis
analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
provenance = impact_report.impact_function.provenance
hazard_keywords = provenance['hazard_keywords']
exposure_keywords = provenance['exposure_keywords']
"""Generate Donut chart for affected population."""
# create context for the donut chart
# retrieve hazard classification from hazard layer
hazard_classification = definition(
active_classification(hazard_keywords, exposure_keywords['exposure']))
if not hazard_classification:
return context
data = []
labels = []
colors = []
for hazard_class in hazard_classification['classes']:
# Skip if it is not affected hazard class
if not hazard_class['affected']:
continue
# hazard_count_field is a dynamic field with hazard class
# as parameter
field_key_name = hazard_count_field['key'] % (
hazard_class['key'],)
try:
# retrieve dynamic field name from analysis_fields keywords
# will cause key error if no hazard count for that particular
# class
field_name = analysis_layer_fields[field_key_name]
# Hazard label taken from translated hazard count field
# label, string-formatted with translated hazard class label
hazard_value = value_from_field_name(field_name, analysis_layer)
hazard_value = round_affected_number(
hazard_value,
use_rounding=True,
use_population_rounding=True)
except KeyError:
# in case the field was not found
continue
data.append(hazard_value)
labels.append(hazard_class['name'])
colors.append(hazard_class['color'].name())
# add total not affected
try:
field_name = analysis_layer_fields[total_not_affected_field['key']]
hazard_value = value_from_field_name(field_name, analysis_layer)
hazard_value = round_affected_number(
hazard_value,
use_rounding=True,
use_population_rounding=True)
data.append(hazard_value)
labels.append(total_not_affected_field['name'])
colors.append(green.name())
except KeyError:
# in case the field is not there
pass
# add number for total not affected
chart_title = resolve_from_dictionary(extra_args, 'chart_title')
total_header = resolve_from_dictionary(extra_args, 'total_header')
donut_context = DonutChartContext(
data=data,
labels=labels,
colors=colors,
inner_radius_ratio=0.5,
stroke_color='#fff',
title=chart_title,
total_header=total_header,
as_file=True)
context['context'] = donut_context
return context | def function[population_chart_extractor, parameter[impact_report, component_metadata]]:
constant[Creating population donut chart.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
]
variable[context] assign[=] dictionary[[], []]
variable[extra_args] assign[=] name[component_metadata].extra_args
variable[analysis_layer] assign[=] name[impact_report].analysis
variable[analysis_layer_fields] assign[=] call[name[analysis_layer].keywords][constant[inasafe_fields]]
variable[provenance] assign[=] name[impact_report].impact_function.provenance
variable[hazard_keywords] assign[=] call[name[provenance]][constant[hazard_keywords]]
variable[exposure_keywords] assign[=] call[name[provenance]][constant[exposure_keywords]]
constant[Generate Donut chart for affected population.]
variable[hazard_classification] assign[=] call[name[definition], parameter[call[name[active_classification], parameter[name[hazard_keywords], call[name[exposure_keywords]][constant[exposure]]]]]]
if <ast.UnaryOp object at 0x7da1b0c3ec50> begin[:]
return[name[context]]
variable[data] assign[=] list[[]]
variable[labels] assign[=] list[[]]
variable[colors] assign[=] list[[]]
for taget[name[hazard_class]] in starred[call[name[hazard_classification]][constant[classes]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0c3d3f0> begin[:]
continue
variable[field_key_name] assign[=] binary_operation[call[name[hazard_count_field]][constant[key]] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b0c3d9f0>]]]
<ast.Try object at 0x7da1b0c3e2c0>
call[name[data].append, parameter[name[hazard_value]]]
call[name[labels].append, parameter[call[name[hazard_class]][constant[name]]]]
call[name[colors].append, parameter[call[call[name[hazard_class]][constant[color]].name, parameter[]]]]
<ast.Try object at 0x7da1b0c3e500>
variable[chart_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[chart_title]]]
variable[total_header] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[total_header]]]
variable[donut_context] assign[=] call[name[DonutChartContext], parameter[]]
call[name[context]][constant[context]] assign[=] name[donut_context]
return[name[context]] | keyword[def] identifier[population_chart_extractor] ( identifier[impact_report] , identifier[component_metadata] ):
literal[string]
identifier[context] ={}
identifier[extra_args] = identifier[component_metadata] . identifier[extra_args]
identifier[analysis_layer] = identifier[impact_report] . identifier[analysis]
identifier[analysis_layer_fields] = identifier[analysis_layer] . identifier[keywords] [ literal[string] ]
identifier[provenance] = identifier[impact_report] . identifier[impact_function] . identifier[provenance]
identifier[hazard_keywords] = identifier[provenance] [ literal[string] ]
identifier[exposure_keywords] = identifier[provenance] [ literal[string] ]
literal[string]
identifier[hazard_classification] = identifier[definition] (
identifier[active_classification] ( identifier[hazard_keywords] , identifier[exposure_keywords] [ literal[string] ]))
keyword[if] keyword[not] identifier[hazard_classification] :
keyword[return] identifier[context]
identifier[data] =[]
identifier[labels] =[]
identifier[colors] =[]
keyword[for] identifier[hazard_class] keyword[in] identifier[hazard_classification] [ literal[string] ]:
keyword[if] keyword[not] identifier[hazard_class] [ literal[string] ]:
keyword[continue]
identifier[field_key_name] = identifier[hazard_count_field] [ literal[string] ]%(
identifier[hazard_class] [ literal[string] ],)
keyword[try] :
identifier[field_name] = identifier[analysis_layer_fields] [ identifier[field_key_name] ]
identifier[hazard_value] = identifier[value_from_field_name] ( identifier[field_name] , identifier[analysis_layer] )
identifier[hazard_value] = identifier[round_affected_number] (
identifier[hazard_value] ,
identifier[use_rounding] = keyword[True] ,
identifier[use_population_rounding] = keyword[True] )
keyword[except] identifier[KeyError] :
keyword[continue]
identifier[data] . identifier[append] ( identifier[hazard_value] )
identifier[labels] . identifier[append] ( identifier[hazard_class] [ literal[string] ])
identifier[colors] . identifier[append] ( identifier[hazard_class] [ literal[string] ]. identifier[name] ())
keyword[try] :
identifier[field_name] = identifier[analysis_layer_fields] [ identifier[total_not_affected_field] [ literal[string] ]]
identifier[hazard_value] = identifier[value_from_field_name] ( identifier[field_name] , identifier[analysis_layer] )
identifier[hazard_value] = identifier[round_affected_number] (
identifier[hazard_value] ,
identifier[use_rounding] = keyword[True] ,
identifier[use_population_rounding] = keyword[True] )
identifier[data] . identifier[append] ( identifier[hazard_value] )
identifier[labels] . identifier[append] ( identifier[total_not_affected_field] [ literal[string] ])
identifier[colors] . identifier[append] ( identifier[green] . identifier[name] ())
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[chart_title] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[total_header] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[donut_context] = identifier[DonutChartContext] (
identifier[data] = identifier[data] ,
identifier[labels] = identifier[labels] ,
identifier[colors] = identifier[colors] ,
identifier[inner_radius_ratio] = literal[int] ,
identifier[stroke_color] = literal[string] ,
identifier[title] = identifier[chart_title] ,
identifier[total_header] = identifier[total_header] ,
identifier[as_file] = keyword[True] )
identifier[context] [ literal[string] ]= identifier[donut_context]
keyword[return] identifier[context] | def population_chart_extractor(impact_report, component_metadata):
"""Creating population donut chart.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
extra_args = component_metadata.extra_args
analysis_layer = impact_report.analysis
analysis_layer_fields = analysis_layer.keywords['inasafe_fields']
provenance = impact_report.impact_function.provenance
hazard_keywords = provenance['hazard_keywords']
exposure_keywords = provenance['exposure_keywords']
'Generate Donut chart for affected population.'
# create context for the donut chart
# retrieve hazard classification from hazard layer
hazard_classification = definition(active_classification(hazard_keywords, exposure_keywords['exposure']))
if not hazard_classification:
return context # depends on [control=['if'], data=[]]
data = []
labels = []
colors = []
for hazard_class in hazard_classification['classes']:
# Skip if it is not affected hazard class
if not hazard_class['affected']:
continue # depends on [control=['if'], data=[]]
# hazard_count_field is a dynamic field with hazard class
# as parameter
field_key_name = hazard_count_field['key'] % (hazard_class['key'],)
try:
# retrieve dynamic field name from analysis_fields keywords
# will cause key error if no hazard count for that particular
# class
field_name = analysis_layer_fields[field_key_name]
# Hazard label taken from translated hazard count field
# label, string-formatted with translated hazard class label
hazard_value = value_from_field_name(field_name, analysis_layer)
hazard_value = round_affected_number(hazard_value, use_rounding=True, use_population_rounding=True) # depends on [control=['try'], data=[]]
except KeyError:
# in case the field was not found
continue # depends on [control=['except'], data=[]]
data.append(hazard_value)
labels.append(hazard_class['name'])
colors.append(hazard_class['color'].name()) # depends on [control=['for'], data=['hazard_class']]
# add total not affected
try:
field_name = analysis_layer_fields[total_not_affected_field['key']]
hazard_value = value_from_field_name(field_name, analysis_layer)
hazard_value = round_affected_number(hazard_value, use_rounding=True, use_population_rounding=True)
data.append(hazard_value)
labels.append(total_not_affected_field['name'])
colors.append(green.name()) # depends on [control=['try'], data=[]]
except KeyError:
# in case the field is not there
pass # depends on [control=['except'], data=[]]
# add number for total not affected
chart_title = resolve_from_dictionary(extra_args, 'chart_title')
total_header = resolve_from_dictionary(extra_args, 'total_header')
donut_context = DonutChartContext(data=data, labels=labels, colors=colors, inner_radius_ratio=0.5, stroke_color='#fff', title=chart_title, total_header=total_header, as_file=True)
context['context'] = donut_context
return context |
def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj]
try:
c = float(obj)
i = int(c)
i = i if i == c else None
except (ValueError, TypeError):
i = None
if i is not None:
return Element.from_Z(i)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except:
raise ValueError("Can't parse Element or String from type"
" %s: %s." % (type(obj), obj)) | def function[get_el_sp, parameter[obj]]:
constant[
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
]
if call[name[isinstance], parameter[name[obj], tuple[[<ast.Name object at 0x7da204567040>, <ast.Name object at 0x7da204564070>, <ast.Name object at 0x7da204566c20>]]]] begin[:]
return[name[obj]]
if call[name[isinstance], parameter[name[obj], tuple[[<ast.Name object at 0x7da204567a00>, <ast.Name object at 0x7da204564ca0>]]]] begin[:]
return[<ast.ListComp object at 0x7da204564fa0>]
<ast.Try object at 0x7da2045679d0>
if compare[name[i] is_not constant[None]] begin[:]
return[call[name[Element].from_Z, parameter[name[i]]]]
<ast.Try object at 0x7da204565900> | keyword[def] identifier[get_el_sp] ( identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] ,( identifier[Element] , identifier[Specie] , identifier[DummySpecie] )):
keyword[return] identifier[obj]
keyword[if] identifier[isinstance] ( identifier[obj] ,( identifier[list] , identifier[tuple] )):
keyword[return] [ identifier[get_el_sp] ( identifier[o] ) keyword[for] identifier[o] keyword[in] identifier[obj] ]
keyword[try] :
identifier[c] = identifier[float] ( identifier[obj] )
identifier[i] = identifier[int] ( identifier[c] )
identifier[i] = identifier[i] keyword[if] identifier[i] == identifier[c] keyword[else] keyword[None]
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
identifier[i] = keyword[None]
keyword[if] identifier[i] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[Element] . identifier[from_Z] ( identifier[i] )
keyword[try] :
keyword[return] identifier[Specie] . identifier[from_string] ( identifier[obj] )
keyword[except] ( identifier[ValueError] , identifier[KeyError] ):
keyword[try] :
keyword[return] identifier[Element] ( identifier[obj] )
keyword[except] ( identifier[ValueError] , identifier[KeyError] ):
keyword[try] :
keyword[return] identifier[DummySpecie] . identifier[from_string] ( identifier[obj] )
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] %( identifier[type] ( identifier[obj] ), identifier[obj] )) | def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj # depends on [control=['if'], data=[]]
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj] # depends on [control=['if'], data=[]]
try:
c = float(obj)
i = int(c)
i = i if i == c else None # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
i = None # depends on [control=['except'], data=[]]
if i is not None:
return Element.from_Z(i) # depends on [control=['if'], data=['i']]
try:
return Specie.from_string(obj) # depends on [control=['try'], data=[]]
except (ValueError, KeyError):
try:
return Element(obj) # depends on [control=['try'], data=[]]
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj) # depends on [control=['try'], data=[]]
except:
raise ValueError("Can't parse Element or String from type %s: %s." % (type(obj), obj)) # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] |
def bond_sample_states(
perc_graph, num_nodes, num_edges, seed, spanning_cluster=True,
auxiliary_node_attributes=None, auxiliary_edge_attributes=None,
spanning_sides=None,
**kwargs
):
'''
Generate successive sample states of the bond percolation model
This is a :ref:`generator function <python:tut-generators>` to successively
add one edge at a time from the graph to the percolation model.
At each iteration, it calculates and returns the cluster statistics.
CAUTION: it returns a reference to the internal array, not a copy.
Parameters
----------
perc_graph : networkx.Graph
The substrate graph on which percolation is to take place
num_nodes : int
Number ``N`` of sites in the graph
num_edges : int
Number ``M`` of bonds in the graph
seed : {None, int, array_like}
Random seed initializing the pseudo-random number generator.
Piped through to `numpy.random.RandomState` constructor.
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
auxiliary_node_attributes : optional
Return value of ``networkx.get_node_attributes(graph, 'span')``
auxiliary_edge_attributes : optional
Return value of ``networkx.get_edge_attributes(graph, 'span')``
spanning_sides : list, optional
List of keys (attribute values) of the two sides of the auxiliary
nodes.
Return value of ``list(set(auxiliary_node_attributes.values()))``
Yields
------
ret : ndarray
Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),
('max_cluster_size', 'uint32'), ('moments', 'int64', 5)]``
ret['n'] : ndarray of int
The number of bonds added at the particular iteration
ret['edge'] : ndarray of int
The index of the edge added at the particular iteration
Note that in the first step, when ``ret['n'] == 0``, this value is
undefined!
ret['has_spanning_cluster'] : ndarray of bool
``True`` if there is a spanning cluster, ``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['max_cluster_size'] : int
Size of the largest cluster (absolute number of sites)
ret['moments'] : 1-D :py:class:`numpy.ndarray` of int
Array of size ``5``.
The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster
size distribution, with ``k`` ranging from ``0`` to ``4``.
Raises
------
ValueError
If `spanning_cluster` is ``True``, but `graph` does not contain any
auxiliary nodes to detect spanning clusters.
See also
--------
numpy.random.RandomState
microcanonical_statistics_dtype
Notes
-----
Iterating through this generator is a single run of the Newman-Ziff
algorithm. [12]_
The first iteration yields the trivial state with :math:`n = 0` occupied
bonds.
Spanning cluster
In order to detect a spanning cluster, `graph` needs to contain
auxiliary nodes and edges, cf. Reference [12]_, Figure 6.
The auxiliary nodes and edges have the ``'span'`` `attribute
<http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.
The value is either ``0`` or ``1``, distinguishing the two sides of the
graph to span.
Raw moments of the cluster size distribution
The :math:`k`-th raw moment of the (absolute) cluster size distribution
is :math:`\sum_s' s^k N_s`, where :math:`s` is the cluster size and
:math:`N_s` is the number of clusters of size :math:`s`. [13]_
The primed sum :math:`\sum'` signifies that the largest cluster is
excluded from the sum. [14]_
References
----------
.. [12] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site
or bond percolation. Physical Review E 64, 016706+ (2001),
`doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.
.. [13] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &
Francis, London, 1994), second edn.
.. [14] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical
Physics (Springer, Berlin, Heidelberg, 2010),
`doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.
'''
# construct random number generator
rng = np.random.RandomState(seed=seed)
if spanning_cluster:
if len(spanning_sides) != 2:
raise ValueError(
'Spanning cluster is to be detected, but auxiliary nodes '
'of less or more than 2 types (sides) given.'
)
# get a list of edges for easy access in later iterations
perc_edges = perc_graph.edges()
perm_edges = rng.permutation(num_edges)
# initial iteration: no edges added yet (n == 0)
ret = np.empty(
1, dtype=microcanonical_statistics_dtype(spanning_cluster)
)
ret['n'] = 0
ret['max_cluster_size'] = 1
ret['moments'] = np.ones(5, dtype='uint64') * (num_nodes - 1)
if spanning_cluster:
ret['has_spanning_cluster'] = False
# yield cluster statistics for n == 0
yield ret
# set up disjoint set (union-find) data structure
ds = nx.utils.union_find.UnionFind()
if spanning_cluster:
ds_spanning = nx.utils.union_find.UnionFind()
# merge all auxiliary nodes for each side
side_roots = dict()
for side in spanning_sides:
nodes = [
node for (node, node_side) in auxiliary_node_attributes.items()
if node_side is side
]
ds_spanning.union(*nodes)
side_roots[side] = ds_spanning[nodes[0]]
for (edge, edge_side) in auxiliary_edge_attributes.items():
ds_spanning.union(side_roots[edge_side], *edge)
side_roots = [
ds_spanning[side_root] for side_root in side_roots.values()
]
# get first node
max_cluster_root = next(perc_graph.nodes_iter())
# loop over all edges (n == 1..M)
for n in range(num_edges):
ret['n'] += 1
# draw new edge from permutation
edge_index = perm_edges[n]
edge = perc_edges[edge_index]
ret['edge'] = edge_index
# find roots and weights
roots = [
ds[node] for node in edge
]
weights = [
ds.weights[root] for root in roots
]
if roots[0] is not roots[1]:
# not same cluster: union!
ds.union(*roots)
if spanning_cluster:
ds_spanning.union(*roots)
ret['has_spanning_cluster'] = (
ds_spanning[side_roots[0]] == ds_spanning[side_roots[1]]
)
# find new root and weight
root = ds[edge[0]]
weight = ds.weights[root]
# moments and maximum cluster size
# deduct the previous sub-maximum clusters from moments
for i in [0, 1]:
if roots[i] is max_cluster_root:
continue
ret['moments'] -= weights[i] ** np.arange(5, dtype='uint64')
if max_cluster_root in roots:
# merged with maximum cluster
max_cluster_root = root
ret['max_cluster_size'] = weight
else:
# merged previously sub-maximum clusters
if ret['max_cluster_size'] >= weight:
# previously largest cluster remains largest cluster
# add merged cluster to moments
ret['moments'] += weight ** np.arange(5, dtype='uint64')
else:
# merged cluster overtook previously largest cluster
# add previously largest cluster to moments
max_cluster_root = root
ret['moments'] += ret['max_cluster_size'] ** np.arange(
5, dtype='uint64'
)
ret['max_cluster_size'] = weight
yield ret | def function[bond_sample_states, parameter[perc_graph, num_nodes, num_edges, seed, spanning_cluster, auxiliary_node_attributes, auxiliary_edge_attributes, spanning_sides]]:
constant[
Generate successive sample states of the bond percolation model
This is a :ref:`generator function <python:tut-generators>` to successively
add one edge at a time from the graph to the percolation model.
At each iteration, it calculates and returns the cluster statistics.
CAUTION: it returns a reference to the internal array, not a copy.
Parameters
----------
perc_graph : networkx.Graph
The substrate graph on which percolation is to take place
num_nodes : int
Number ``N`` of sites in the graph
num_edges : int
Number ``M`` of bonds in the graph
seed : {None, int, array_like}
Random seed initializing the pseudo-random number generator.
Piped through to `numpy.random.RandomState` constructor.
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
auxiliary_node_attributes : optional
Return value of ``networkx.get_node_attributes(graph, 'span')``
auxiliary_edge_attributes : optional
Return value of ``networkx.get_edge_attributes(graph, 'span')``
spanning_sides : list, optional
List of keys (attribute values) of the two sides of the auxiliary
nodes.
Return value of ``list(set(auxiliary_node_attributes.values()))``
Yields
------
ret : ndarray
Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),
('max_cluster_size', 'uint32'), ('moments', 'int64', 5)]``
ret['n'] : ndarray of int
The number of bonds added at the particular iteration
ret['edge'] : ndarray of int
The index of the edge added at the particular iteration
Note that in the first step, when ``ret['n'] == 0``, this value is
undefined!
ret['has_spanning_cluster'] : ndarray of bool
``True`` if there is a spanning cluster, ``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['max_cluster_size'] : int
Size of the largest cluster (absolute number of sites)
ret['moments'] : 1-D :py:class:`numpy.ndarray` of int
Array of size ``5``.
The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster
size distribution, with ``k`` ranging from ``0`` to ``4``.
Raises
------
ValueError
If `spanning_cluster` is ``True``, but `graph` does not contain any
auxiliary nodes to detect spanning clusters.
See also
--------
numpy.random.RandomState
microcanonical_statistics_dtype
Notes
-----
Iterating through this generator is a single run of the Newman-Ziff
algorithm. [12]_
The first iteration yields the trivial state with :math:`n = 0` occupied
bonds.
Spanning cluster
In order to detect a spanning cluster, `graph` needs to contain
auxiliary nodes and edges, cf. Reference [12]_, Figure 6.
The auxiliary nodes and edges have the ``'span'`` `attribute
<http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.
The value is either ``0`` or ``1``, distinguishing the two sides of the
graph to span.
Raw moments of the cluster size distribution
The :math:`k`-th raw moment of the (absolute) cluster size distribution
is :math:`\sum_s' s^k N_s`, where :math:`s` is the cluster size and
:math:`N_s` is the number of clusters of size :math:`s`. [13]_
The primed sum :math:`\sum'` signifies that the largest cluster is
excluded from the sum. [14]_
References
----------
.. [12] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site
or bond percolation. Physical Review E 64, 016706+ (2001),
`doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.
.. [13] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &
Francis, London, 1994), second edn.
.. [14] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical
Physics (Springer, Berlin, Heidelberg, 2010),
`doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.
]
variable[rng] assign[=] call[name[np].random.RandomState, parameter[]]
if name[spanning_cluster] begin[:]
if compare[call[name[len], parameter[name[spanning_sides]]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1aff75ea0>
variable[perc_edges] assign[=] call[name[perc_graph].edges, parameter[]]
variable[perm_edges] assign[=] call[name[rng].permutation, parameter[name[num_edges]]]
variable[ret] assign[=] call[name[np].empty, parameter[constant[1]]]
call[name[ret]][constant[n]] assign[=] constant[0]
call[name[ret]][constant[max_cluster_size]] assign[=] constant[1]
call[name[ret]][constant[moments]] assign[=] binary_operation[call[name[np].ones, parameter[constant[5]]] * binary_operation[name[num_nodes] - constant[1]]]
if name[spanning_cluster] begin[:]
call[name[ret]][constant[has_spanning_cluster]] assign[=] constant[False]
<ast.Yield object at 0x7da1b00f5570>
variable[ds] assign[=] call[name[nx].utils.union_find.UnionFind, parameter[]]
if name[spanning_cluster] begin[:]
variable[ds_spanning] assign[=] call[name[nx].utils.union_find.UnionFind, parameter[]]
variable[side_roots] assign[=] call[name[dict], parameter[]]
for taget[name[side]] in starred[name[spanning_sides]] begin[:]
variable[nodes] assign[=] <ast.ListComp object at 0x7da1b00f6260>
call[name[ds_spanning].union, parameter[<ast.Starred object at 0x7da1b00f7880>]]
call[name[side_roots]][name[side]] assign[=] call[name[ds_spanning]][call[name[nodes]][constant[0]]]
for taget[tuple[[<ast.Name object at 0x7da20c796320>, <ast.Name object at 0x7da20c795030>]]] in starred[call[name[auxiliary_edge_attributes].items, parameter[]]] begin[:]
call[name[ds_spanning].union, parameter[call[name[side_roots]][name[edge_side]], <ast.Starred object at 0x7da20c797730>]]
variable[side_roots] assign[=] <ast.ListComp object at 0x7da20c795330>
variable[max_cluster_root] assign[=] call[name[next], parameter[call[name[perc_graph].nodes_iter, parameter[]]]]
for taget[name[n]] in starred[call[name[range], parameter[name[num_edges]]]] begin[:]
<ast.AugAssign object at 0x7da1aff77d00>
variable[edge_index] assign[=] call[name[perm_edges]][name[n]]
variable[edge] assign[=] call[name[perc_edges]][name[edge_index]]
call[name[ret]][constant[edge]] assign[=] name[edge_index]
variable[roots] assign[=] <ast.ListComp object at 0x7da1aff74e50>
variable[weights] assign[=] <ast.ListComp object at 0x7da1aff773d0>
if compare[call[name[roots]][constant[0]] is_not call[name[roots]][constant[1]]] begin[:]
call[name[ds].union, parameter[<ast.Starred object at 0x7da1aff74670>]]
if name[spanning_cluster] begin[:]
call[name[ds_spanning].union, parameter[<ast.Starred object at 0x7da1aff76560>]]
call[name[ret]][constant[has_spanning_cluster]] assign[=] compare[call[name[ds_spanning]][call[name[side_roots]][constant[0]]] equal[==] call[name[ds_spanning]][call[name[side_roots]][constant[1]]]]
variable[root] assign[=] call[name[ds]][call[name[edge]][constant[0]]]
variable[weight] assign[=] call[name[ds].weights][name[root]]
for taget[name[i]] in starred[list[[<ast.Constant object at 0x7da1aff774c0>, <ast.Constant object at 0x7da1aff76d70>]]] begin[:]
if compare[call[name[roots]][name[i]] is name[max_cluster_root]] begin[:]
continue
<ast.AugAssign object at 0x7da1aff762f0>
if compare[name[max_cluster_root] in name[roots]] begin[:]
variable[max_cluster_root] assign[=] name[root]
call[name[ret]][constant[max_cluster_size]] assign[=] name[weight]
<ast.Yield object at 0x7da1aff76410> | keyword[def] identifier[bond_sample_states] (
identifier[perc_graph] , identifier[num_nodes] , identifier[num_edges] , identifier[seed] , identifier[spanning_cluster] = keyword[True] ,
identifier[auxiliary_node_attributes] = keyword[None] , identifier[auxiliary_edge_attributes] = keyword[None] ,
identifier[spanning_sides] = keyword[None] ,
** identifier[kwargs]
):
literal[string]
identifier[rng] = identifier[np] . identifier[random] . identifier[RandomState] ( identifier[seed] = identifier[seed] )
keyword[if] identifier[spanning_cluster] :
keyword[if] identifier[len] ( identifier[spanning_sides] )!= literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string]
)
identifier[perc_edges] = identifier[perc_graph] . identifier[edges] ()
identifier[perm_edges] = identifier[rng] . identifier[permutation] ( identifier[num_edges] )
identifier[ret] = identifier[np] . identifier[empty] (
literal[int] , identifier[dtype] = identifier[microcanonical_statistics_dtype] ( identifier[spanning_cluster] )
)
identifier[ret] [ literal[string] ]= literal[int]
identifier[ret] [ literal[string] ]= literal[int]
identifier[ret] [ literal[string] ]= identifier[np] . identifier[ones] ( literal[int] , identifier[dtype] = literal[string] )*( identifier[num_nodes] - literal[int] )
keyword[if] identifier[spanning_cluster] :
identifier[ret] [ literal[string] ]= keyword[False]
keyword[yield] identifier[ret]
identifier[ds] = identifier[nx] . identifier[utils] . identifier[union_find] . identifier[UnionFind] ()
keyword[if] identifier[spanning_cluster] :
identifier[ds_spanning] = identifier[nx] . identifier[utils] . identifier[union_find] . identifier[UnionFind] ()
identifier[side_roots] = identifier[dict] ()
keyword[for] identifier[side] keyword[in] identifier[spanning_sides] :
identifier[nodes] =[
identifier[node] keyword[for] ( identifier[node] , identifier[node_side] ) keyword[in] identifier[auxiliary_node_attributes] . identifier[items] ()
keyword[if] identifier[node_side] keyword[is] identifier[side]
]
identifier[ds_spanning] . identifier[union] (* identifier[nodes] )
identifier[side_roots] [ identifier[side] ]= identifier[ds_spanning] [ identifier[nodes] [ literal[int] ]]
keyword[for] ( identifier[edge] , identifier[edge_side] ) keyword[in] identifier[auxiliary_edge_attributes] . identifier[items] ():
identifier[ds_spanning] . identifier[union] ( identifier[side_roots] [ identifier[edge_side] ],* identifier[edge] )
identifier[side_roots] =[
identifier[ds_spanning] [ identifier[side_root] ] keyword[for] identifier[side_root] keyword[in] identifier[side_roots] . identifier[values] ()
]
identifier[max_cluster_root] = identifier[next] ( identifier[perc_graph] . identifier[nodes_iter] ())
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[num_edges] ):
identifier[ret] [ literal[string] ]+= literal[int]
identifier[edge_index] = identifier[perm_edges] [ identifier[n] ]
identifier[edge] = identifier[perc_edges] [ identifier[edge_index] ]
identifier[ret] [ literal[string] ]= identifier[edge_index]
identifier[roots] =[
identifier[ds] [ identifier[node] ] keyword[for] identifier[node] keyword[in] identifier[edge]
]
identifier[weights] =[
identifier[ds] . identifier[weights] [ identifier[root] ] keyword[for] identifier[root] keyword[in] identifier[roots]
]
keyword[if] identifier[roots] [ literal[int] ] keyword[is] keyword[not] identifier[roots] [ literal[int] ]:
identifier[ds] . identifier[union] (* identifier[roots] )
keyword[if] identifier[spanning_cluster] :
identifier[ds_spanning] . identifier[union] (* identifier[roots] )
identifier[ret] [ literal[string] ]=(
identifier[ds_spanning] [ identifier[side_roots] [ literal[int] ]]== identifier[ds_spanning] [ identifier[side_roots] [ literal[int] ]]
)
identifier[root] = identifier[ds] [ identifier[edge] [ literal[int] ]]
identifier[weight] = identifier[ds] . identifier[weights] [ identifier[root] ]
keyword[for] identifier[i] keyword[in] [ literal[int] , literal[int] ]:
keyword[if] identifier[roots] [ identifier[i] ] keyword[is] identifier[max_cluster_root] :
keyword[continue]
identifier[ret] [ literal[string] ]-= identifier[weights] [ identifier[i] ]** identifier[np] . identifier[arange] ( literal[int] , identifier[dtype] = literal[string] )
keyword[if] identifier[max_cluster_root] keyword[in] identifier[roots] :
identifier[max_cluster_root] = identifier[root]
identifier[ret] [ literal[string] ]= identifier[weight]
keyword[else] :
keyword[if] identifier[ret] [ literal[string] ]>= identifier[weight] :
identifier[ret] [ literal[string] ]+= identifier[weight] ** identifier[np] . identifier[arange] ( literal[int] , identifier[dtype] = literal[string] )
keyword[else] :
identifier[max_cluster_root] = identifier[root]
identifier[ret] [ literal[string] ]+= identifier[ret] [ literal[string] ]** identifier[np] . identifier[arange] (
literal[int] , identifier[dtype] = literal[string]
)
identifier[ret] [ literal[string] ]= identifier[weight]
keyword[yield] identifier[ret] | def bond_sample_states(perc_graph, num_nodes, num_edges, seed, spanning_cluster=True, auxiliary_node_attributes=None, auxiliary_edge_attributes=None, spanning_sides=None, **kwargs):
"""
Generate successive sample states of the bond percolation model
This is a :ref:`generator function <python:tut-generators>` to successively
add one edge at a time from the graph to the percolation model.
At each iteration, it calculates and returns the cluster statistics.
CAUTION: it returns a reference to the internal array, not a copy.
Parameters
----------
perc_graph : networkx.Graph
The substrate graph on which percolation is to take place
num_nodes : int
Number ``N`` of sites in the graph
num_edges : int
Number ``M`` of bonds in the graph
seed : {None, int, array_like}
Random seed initializing the pseudo-random number generator.
Piped through to `numpy.random.RandomState` constructor.
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
auxiliary_node_attributes : optional
Return value of ``networkx.get_node_attributes(graph, 'span')``
auxiliary_edge_attributes : optional
Return value of ``networkx.get_edge_attributes(graph, 'span')``
spanning_sides : list, optional
List of keys (attribute values) of the two sides of the auxiliary
nodes.
Return value of ``list(set(auxiliary_node_attributes.values()))``
Yields
------
ret : ndarray
Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),
('max_cluster_size', 'uint32'), ('moments', 'int64', 5)]``
ret['n'] : ndarray of int
The number of bonds added at the particular iteration
ret['edge'] : ndarray of int
The index of the edge added at the particular iteration
Note that in the first step, when ``ret['n'] == 0``, this value is
undefined!
ret['has_spanning_cluster'] : ndarray of bool
``True`` if there is a spanning cluster, ``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['max_cluster_size'] : int
Size of the largest cluster (absolute number of sites)
ret['moments'] : 1-D :py:class:`numpy.ndarray` of int
Array of size ``5``.
The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster
size distribution, with ``k`` ranging from ``0`` to ``4``.
Raises
------
ValueError
If `spanning_cluster` is ``True``, but `graph` does not contain any
auxiliary nodes to detect spanning clusters.
See also
--------
numpy.random.RandomState
microcanonical_statistics_dtype
Notes
-----
Iterating through this generator is a single run of the Newman-Ziff
algorithm. [12]_
The first iteration yields the trivial state with :math:`n = 0` occupied
bonds.
Spanning cluster
In order to detect a spanning cluster, `graph` needs to contain
auxiliary nodes and edges, cf. Reference [12]_, Figure 6.
The auxiliary nodes and edges have the ``'span'`` `attribute
<http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.
The value is either ``0`` or ``1``, distinguishing the two sides of the
graph to span.
Raw moments of the cluster size distribution
The :math:`k`-th raw moment of the (absolute) cluster size distribution
is :math:`\\sum_s' s^k N_s`, where :math:`s` is the cluster size and
:math:`N_s` is the number of clusters of size :math:`s`. [13]_
The primed sum :math:`\\sum'` signifies that the largest cluster is
excluded from the sum. [14]_
References
----------
.. [12] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site
or bond percolation. Physical Review E 64, 016706+ (2001),
`doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.
.. [13] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &
Francis, London, 1994), second edn.
.. [14] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical
Physics (Springer, Berlin, Heidelberg, 2010),
`doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.
"""
# construct random number generator
rng = np.random.RandomState(seed=seed)
if spanning_cluster:
if len(spanning_sides) != 2:
raise ValueError('Spanning cluster is to be detected, but auxiliary nodes of less or more than 2 types (sides) given.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# get a list of edges for easy access in later iterations
perc_edges = perc_graph.edges()
perm_edges = rng.permutation(num_edges)
# initial iteration: no edges added yet (n == 0)
ret = np.empty(1, dtype=microcanonical_statistics_dtype(spanning_cluster))
ret['n'] = 0
ret['max_cluster_size'] = 1
ret['moments'] = np.ones(5, dtype='uint64') * (num_nodes - 1)
if spanning_cluster:
ret['has_spanning_cluster'] = False # depends on [control=['if'], data=[]]
# yield cluster statistics for n == 0
yield ret
# set up disjoint set (union-find) data structure
ds = nx.utils.union_find.UnionFind()
if spanning_cluster:
ds_spanning = nx.utils.union_find.UnionFind()
# merge all auxiliary nodes for each side
side_roots = dict()
for side in spanning_sides:
nodes = [node for (node, node_side) in auxiliary_node_attributes.items() if node_side is side]
ds_spanning.union(*nodes)
side_roots[side] = ds_spanning[nodes[0]] # depends on [control=['for'], data=['side']]
for (edge, edge_side) in auxiliary_edge_attributes.items():
ds_spanning.union(side_roots[edge_side], *edge) # depends on [control=['for'], data=[]]
side_roots = [ds_spanning[side_root] for side_root in side_roots.values()] # depends on [control=['if'], data=[]]
# get first node
max_cluster_root = next(perc_graph.nodes_iter())
# loop over all edges (n == 1..M)
for n in range(num_edges):
ret['n'] += 1
# draw new edge from permutation
edge_index = perm_edges[n]
edge = perc_edges[edge_index]
ret['edge'] = edge_index
# find roots and weights
roots = [ds[node] for node in edge]
weights = [ds.weights[root] for root in roots]
if roots[0] is not roots[1]:
# not same cluster: union!
ds.union(*roots)
if spanning_cluster:
ds_spanning.union(*roots)
ret['has_spanning_cluster'] = ds_spanning[side_roots[0]] == ds_spanning[side_roots[1]] # depends on [control=['if'], data=[]]
# find new root and weight
root = ds[edge[0]]
weight = ds.weights[root]
# moments and maximum cluster size
# deduct the previous sub-maximum clusters from moments
for i in [0, 1]:
if roots[i] is max_cluster_root:
continue # depends on [control=['if'], data=[]]
ret['moments'] -= weights[i] ** np.arange(5, dtype='uint64') # depends on [control=['for'], data=['i']]
if max_cluster_root in roots:
# merged with maximum cluster
max_cluster_root = root
ret['max_cluster_size'] = weight # depends on [control=['if'], data=['max_cluster_root']]
# merged previously sub-maximum clusters
elif ret['max_cluster_size'] >= weight:
# previously largest cluster remains largest cluster
# add merged cluster to moments
ret['moments'] += weight ** np.arange(5, dtype='uint64') # depends on [control=['if'], data=['weight']]
else:
# merged cluster overtook previously largest cluster
# add previously largest cluster to moments
max_cluster_root = root
ret['moments'] += ret['max_cluster_size'] ** np.arange(5, dtype='uint64')
ret['max_cluster_size'] = weight # depends on [control=['if'], data=[]]
yield ret # depends on [control=['for'], data=['n']] |
def complain(distribution_name):
"""Issue a warning if `distribution_name` is installed.
In a future release, this method will be updated to raise ImportError
rather than just send a warning.
Args:
distribution_name (str): The name of the obsolete distribution.
"""
try:
pkg_resources.get_distribution(distribution_name)
warnings.warn(
"The {pkg} distribution is now obsolete. "
"Please `pip uninstall {pkg}`. "
"In the future, this warning will become an ImportError.".format(
pkg=distribution_name
),
DeprecationWarning,
)
except pkg_resources.DistributionNotFound:
pass | def function[complain, parameter[distribution_name]]:
constant[Issue a warning if `distribution_name` is installed.
In a future release, this method will be updated to raise ImportError
rather than just send a warning.
Args:
distribution_name (str): The name of the obsolete distribution.
]
<ast.Try object at 0x7da20c6e4430> | keyword[def] identifier[complain] ( identifier[distribution_name] ):
literal[string]
keyword[try] :
identifier[pkg_resources] . identifier[get_distribution] ( identifier[distribution_name] )
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[pkg] = identifier[distribution_name]
),
identifier[DeprecationWarning] ,
)
keyword[except] identifier[pkg_resources] . identifier[DistributionNotFound] :
keyword[pass] | def complain(distribution_name):
"""Issue a warning if `distribution_name` is installed.
In a future release, this method will be updated to raise ImportError
rather than just send a warning.
Args:
distribution_name (str): The name of the obsolete distribution.
"""
try:
pkg_resources.get_distribution(distribution_name)
warnings.warn('The {pkg} distribution is now obsolete. Please `pip uninstall {pkg}`. In the future, this warning will become an ImportError.'.format(pkg=distribution_name), DeprecationWarning) # depends on [control=['try'], data=[]]
except pkg_resources.DistributionNotFound:
pass # depends on [control=['except'], data=[]] |
def get_abilities():
"""Visit Bulbapedia and pull names and descriptions from the table, 'list of Abilities.' Save as JSON."""
page = requests.get('http://bulbapedia.bulbagarden.net/wiki/Ability')
soup = bs4.BeautifulSoup(page.text)
table = soup.find("table", {"class": "sortable"})
tablerows = [tr for tr in table.children if tr != '\n'][1:]
abilities = {}
for tr in tablerows:
cells = tr.find_all('td')
ability_name = cells[1].get_text().strip().replace(' ', '-').lower()
ability_desc = unicode(cells[2].get_text().strip())
abilities[ability_name] = ability_desc
srcpath = path.dirname(__file__)
with io.open(path.join(srcpath, 'abilities.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(abilities, ensure_ascii=False)) | def function[get_abilities, parameter[]]:
constant[Visit Bulbapedia and pull names and descriptions from the table, 'list of Abilities.' Save as JSON.]
variable[page] assign[=] call[name[requests].get, parameter[constant[http://bulbapedia.bulbagarden.net/wiki/Ability]]]
variable[soup] assign[=] call[name[bs4].BeautifulSoup, parameter[name[page].text]]
variable[table] assign[=] call[name[soup].find, parameter[constant[table], dictionary[[<ast.Constant object at 0x7da1b1628b20>], [<ast.Constant object at 0x7da1b162bc40>]]]]
variable[tablerows] assign[=] call[<ast.ListComp object at 0x7da1b162b220>][<ast.Slice object at 0x7da1b1629150>]
variable[abilities] assign[=] dictionary[[], []]
for taget[name[tr]] in starred[name[tablerows]] begin[:]
variable[cells] assign[=] call[name[tr].find_all, parameter[constant[td]]]
variable[ability_name] assign[=] call[call[call[call[call[name[cells]][constant[1]].get_text, parameter[]].strip, parameter[]].replace, parameter[constant[ ], constant[-]]].lower, parameter[]]
variable[ability_desc] assign[=] call[name[unicode], parameter[call[call[call[name[cells]][constant[2]].get_text, parameter[]].strip, parameter[]]]]
call[name[abilities]][name[ability_name]] assign[=] name[ability_desc]
variable[srcpath] assign[=] call[name[path].dirname, parameter[name[__file__]]]
with call[name[io].open, parameter[call[name[path].join, parameter[name[srcpath], constant[abilities.json]]], constant[w]]] begin[:]
call[name[f].write, parameter[call[name[json].dumps, parameter[name[abilities]]]]] | keyword[def] identifier[get_abilities] ():
literal[string]
identifier[page] = identifier[requests] . identifier[get] ( literal[string] )
identifier[soup] = identifier[bs4] . identifier[BeautifulSoup] ( identifier[page] . identifier[text] )
identifier[table] = identifier[soup] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
identifier[tablerows] =[ identifier[tr] keyword[for] identifier[tr] keyword[in] identifier[table] . identifier[children] keyword[if] identifier[tr] != literal[string] ][ literal[int] :]
identifier[abilities] ={}
keyword[for] identifier[tr] keyword[in] identifier[tablerows] :
identifier[cells] = identifier[tr] . identifier[find_all] ( literal[string] )
identifier[ability_name] = identifier[cells] [ literal[int] ]. identifier[get_text] (). identifier[strip] (). identifier[replace] ( literal[string] , literal[string] ). identifier[lower] ()
identifier[ability_desc] = identifier[unicode] ( identifier[cells] [ literal[int] ]. identifier[get_text] (). identifier[strip] ())
identifier[abilities] [ identifier[ability_name] ]= identifier[ability_desc]
identifier[srcpath] = identifier[path] . identifier[dirname] ( identifier[__file__] )
keyword[with] identifier[io] . identifier[open] ( identifier[path] . identifier[join] ( identifier[srcpath] , literal[string] ), literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[abilities] , identifier[ensure_ascii] = keyword[False] )) | def get_abilities():
"""Visit Bulbapedia and pull names and descriptions from the table, 'list of Abilities.' Save as JSON."""
page = requests.get('http://bulbapedia.bulbagarden.net/wiki/Ability')
soup = bs4.BeautifulSoup(page.text)
table = soup.find('table', {'class': 'sortable'})
tablerows = [tr for tr in table.children if tr != '\n'][1:]
abilities = {}
for tr in tablerows:
cells = tr.find_all('td')
ability_name = cells[1].get_text().strip().replace(' ', '-').lower()
ability_desc = unicode(cells[2].get_text().strip())
abilities[ability_name] = ability_desc # depends on [control=['for'], data=['tr']]
srcpath = path.dirname(__file__)
with io.open(path.join(srcpath, 'abilities.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(abilities, ensure_ascii=False)) # depends on [control=['with'], data=['f']] |
def add_wic(self, old_wic, wic):
"""
Convert the old style WIC slot to a new style WIC slot and add the WIC
to the node properties
:param str old_wic: Old WIC slot
:param str wic: WIC name
"""
new_wic = 'wic' + old_wic[-1]
self.node['properties'][new_wic] = wic | def function[add_wic, parameter[self, old_wic, wic]]:
constant[
Convert the old style WIC slot to a new style WIC slot and add the WIC
to the node properties
:param str old_wic: Old WIC slot
:param str wic: WIC name
]
variable[new_wic] assign[=] binary_operation[constant[wic] + call[name[old_wic]][<ast.UnaryOp object at 0x7da1b28bf160>]]
call[call[name[self].node][constant[properties]]][name[new_wic]] assign[=] name[wic] | keyword[def] identifier[add_wic] ( identifier[self] , identifier[old_wic] , identifier[wic] ):
literal[string]
identifier[new_wic] = literal[string] + identifier[old_wic] [- literal[int] ]
identifier[self] . identifier[node] [ literal[string] ][ identifier[new_wic] ]= identifier[wic] | def add_wic(self, old_wic, wic):
"""
Convert the old style WIC slot to a new style WIC slot and add the WIC
to the node properties
:param str old_wic: Old WIC slot
:param str wic: WIC name
"""
new_wic = 'wic' + old_wic[-1]
self.node['properties'][new_wic] = wic |
def _refit(self):
"""Update the map :math:`\pi` keeping the output :math:`g` fixed
Use Eq. (7) and below in [GR04]_
"""
# temporary variables for manipulation
mu_diff = np.empty_like(self.f.components[0].mu)
sigma = np.empty_like(self.f.components[0].sigma)
mean = np.empty_like(mu_diff)
cov = np.empty_like(sigma)
for j, c in enumerate(self.g.components):
# stop if inv_map is empty for j-th comp.
if not self.inv_map[j]:
self.g.weights[j] = 0.
continue
# (re-)initialize new mean/cov to zero
mean[:] = 0.0
cov[:] = 0.0
# compute total weight and mean
self.g.weights[j] = self.f.weights[self.inv_map[j]].sum()
for i in self.inv_map[j]:
mean += self.f.weights[i] * self.f.components[i].mu
# rescale by total weight
mean /= self.g.weights[j]
# update covariance
for i in self.inv_map[j]:
# mu_diff = mu'_j - mu_i
mu_diff[:] = mean
mu_diff -= self.f.components[i].mu
# sigma = (mu'_j - mu_i) (mu'_j - mu_i)^T
sigma[:] = np.outer(mu_diff, mu_diff)
# sigma += sigma_i
sigma += self.f.components[i].sigma
# multiply with alpha_i
sigma *= self.f.weights[i]
# sigma_j += alpha_i * (sigma_i + (mu'_j - mu_i) (mu'_j - mu_i)^T
cov += sigma
# 1 / beta_j
cov /= self.g.weights[j]
# update the Mixture
c.update(mean, cov) | def function[_refit, parameter[self]]:
constant[Update the map :math:`\pi` keeping the output :math:`g` fixed
Use Eq. (7) and below in [GR04]_
]
variable[mu_diff] assign[=] call[name[np].empty_like, parameter[call[name[self].f.components][constant[0]].mu]]
variable[sigma] assign[=] call[name[np].empty_like, parameter[call[name[self].f.components][constant[0]].sigma]]
variable[mean] assign[=] call[name[np].empty_like, parameter[name[mu_diff]]]
variable[cov] assign[=] call[name[np].empty_like, parameter[name[sigma]]]
for taget[tuple[[<ast.Name object at 0x7da20c6c7760>, <ast.Name object at 0x7da20c6c5480>]]] in starred[call[name[enumerate], parameter[name[self].g.components]]] begin[:]
if <ast.UnaryOp object at 0x7da20c6c7850> begin[:]
call[name[self].g.weights][name[j]] assign[=] constant[0.0]
continue
call[name[mean]][<ast.Slice object at 0x7da20c6c4100>] assign[=] constant[0.0]
call[name[cov]][<ast.Slice object at 0x7da20c6c5f90>] assign[=] constant[0.0]
call[name[self].g.weights][name[j]] assign[=] call[call[name[self].f.weights][call[name[self].inv_map][name[j]]].sum, parameter[]]
for taget[name[i]] in starred[call[name[self].inv_map][name[j]]] begin[:]
<ast.AugAssign object at 0x7da20c6c6320>
<ast.AugAssign object at 0x7da20c6c70a0>
for taget[name[i]] in starred[call[name[self].inv_map][name[j]]] begin[:]
call[name[mu_diff]][<ast.Slice object at 0x7da20c6c7df0>] assign[=] name[mean]
<ast.AugAssign object at 0x7da20c6c5300>
call[name[sigma]][<ast.Slice object at 0x7da20c6c49a0>] assign[=] call[name[np].outer, parameter[name[mu_diff], name[mu_diff]]]
<ast.AugAssign object at 0x7da20c6c5510>
<ast.AugAssign object at 0x7da20c6c6b30>
<ast.AugAssign object at 0x7da20c6c5bd0>
<ast.AugAssign object at 0x7da20c6c44c0>
call[name[c].update, parameter[name[mean], name[cov]]] | keyword[def] identifier[_refit] ( identifier[self] ):
literal[string]
identifier[mu_diff] = identifier[np] . identifier[empty_like] ( identifier[self] . identifier[f] . identifier[components] [ literal[int] ]. identifier[mu] )
identifier[sigma] = identifier[np] . identifier[empty_like] ( identifier[self] . identifier[f] . identifier[components] [ literal[int] ]. identifier[sigma] )
identifier[mean] = identifier[np] . identifier[empty_like] ( identifier[mu_diff] )
identifier[cov] = identifier[np] . identifier[empty_like] ( identifier[sigma] )
keyword[for] identifier[j] , identifier[c] keyword[in] identifier[enumerate] ( identifier[self] . identifier[g] . identifier[components] ):
keyword[if] keyword[not] identifier[self] . identifier[inv_map] [ identifier[j] ]:
identifier[self] . identifier[g] . identifier[weights] [ identifier[j] ]= literal[int]
keyword[continue]
identifier[mean] [:]= literal[int]
identifier[cov] [:]= literal[int]
identifier[self] . identifier[g] . identifier[weights] [ identifier[j] ]= identifier[self] . identifier[f] . identifier[weights] [ identifier[self] . identifier[inv_map] [ identifier[j] ]]. identifier[sum] ()
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[inv_map] [ identifier[j] ]:
identifier[mean] += identifier[self] . identifier[f] . identifier[weights] [ identifier[i] ]* identifier[self] . identifier[f] . identifier[components] [ identifier[i] ]. identifier[mu]
identifier[mean] /= identifier[self] . identifier[g] . identifier[weights] [ identifier[j] ]
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[inv_map] [ identifier[j] ]:
identifier[mu_diff] [:]= identifier[mean]
identifier[mu_diff] -= identifier[self] . identifier[f] . identifier[components] [ identifier[i] ]. identifier[mu]
identifier[sigma] [:]= identifier[np] . identifier[outer] ( identifier[mu_diff] , identifier[mu_diff] )
identifier[sigma] += identifier[self] . identifier[f] . identifier[components] [ identifier[i] ]. identifier[sigma]
identifier[sigma] *= identifier[self] . identifier[f] . identifier[weights] [ identifier[i] ]
identifier[cov] += identifier[sigma]
identifier[cov] /= identifier[self] . identifier[g] . identifier[weights] [ identifier[j] ]
identifier[c] . identifier[update] ( identifier[mean] , identifier[cov] ) | def _refit(self):
"""Update the map :math:`\\pi` keeping the output :math:`g` fixed
Use Eq. (7) and below in [GR04]_
"""
# temporary variables for manipulation
mu_diff = np.empty_like(self.f.components[0].mu)
sigma = np.empty_like(self.f.components[0].sigma)
mean = np.empty_like(mu_diff)
cov = np.empty_like(sigma)
for (j, c) in enumerate(self.g.components):
# stop if inv_map is empty for j-th comp.
if not self.inv_map[j]:
self.g.weights[j] = 0.0
continue # depends on [control=['if'], data=[]]
# (re-)initialize new mean/cov to zero
mean[:] = 0.0
cov[:] = 0.0
# compute total weight and mean
self.g.weights[j] = self.f.weights[self.inv_map[j]].sum()
for i in self.inv_map[j]:
mean += self.f.weights[i] * self.f.components[i].mu # depends on [control=['for'], data=['i']]
# rescale by total weight
mean /= self.g.weights[j]
# update covariance
for i in self.inv_map[j]:
# mu_diff = mu'_j - mu_i
mu_diff[:] = mean
mu_diff -= self.f.components[i].mu
# sigma = (mu'_j - mu_i) (mu'_j - mu_i)^T
sigma[:] = np.outer(mu_diff, mu_diff)
# sigma += sigma_i
sigma += self.f.components[i].sigma
# multiply with alpha_i
sigma *= self.f.weights[i]
# sigma_j += alpha_i * (sigma_i + (mu'_j - mu_i) (mu'_j - mu_i)^T
cov += sigma # depends on [control=['for'], data=['i']]
# 1 / beta_j
cov /= self.g.weights[j]
# update the Mixture
c.update(mean, cov) # depends on [control=['for'], data=[]] |
def deletescript(self, name):
"""Delete a script from the server
See MANAGESIEVE specifications, section 2.10
:param name: script's name
:rtype: boolean
"""
code, data = self.__send_command(
"DELETESCRIPT", [name.encode("utf-8")])
if code == "OK":
return True
return False | def function[deletescript, parameter[self, name]]:
constant[Delete a script from the server
See MANAGESIEVE specifications, section 2.10
:param name: script's name
:rtype: boolean
]
<ast.Tuple object at 0x7da20c9928f0> assign[=] call[name[self].__send_command, parameter[constant[DELETESCRIPT], list[[<ast.Call object at 0x7da20c992380>]]]]
if compare[name[code] equal[==] constant[OK]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[deletescript] ( identifier[self] , identifier[name] ):
literal[string]
identifier[code] , identifier[data] = identifier[self] . identifier[__send_command] (
literal[string] ,[ identifier[name] . identifier[encode] ( literal[string] )])
keyword[if] identifier[code] == literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def deletescript(self, name):
"""Delete a script from the server
See MANAGESIEVE specifications, section 2.10
:param name: script's name
:rtype: boolean
"""
(code, data) = self.__send_command('DELETESCRIPT', [name.encode('utf-8')])
if code == 'OK':
return True # depends on [control=['if'], data=[]]
return False |
def connect(self):
"""
Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters
"""
if (self.__ser is not None):
serial = importlib.import_module("serial")
if self.__stopbits == 0:
self.__ser.stopbits = serial.STOPBITS_ONE
elif self.__stopbits == 1:
self.__ser.stopbits = serial.STOPBITS_TWO
elif self.__stopbits == 2:
self.__ser.stopbits = serial.STOPBITS_ONE_POINT_FIVE
if self.__parity == 0:
self.__ser.parity = serial.PARITY_EVEN
elif self.__parity == 1:
self.__ser.parity = serial.PARITY_ODD
elif self.__parity == 2:
self.__ser.parity = serial.PARITY_NONE
self.__ser = serial.Serial(self.serialPort, self.__baudrate, timeout=self.__timeout, parity=self.__ser.parity, stopbits=self.__ser.stopbits, xonxoff=0, rtscts=0)
self.__ser.writeTimeout = self.__timeout
#print (self.ser)
if (self.__tcpClientSocket is not None):
self.__tcpClientSocket.settimeout(5)
self.__tcpClientSocket.connect((self.__ipAddress, self.__port))
self.__connected = True
self.__thread = threading.Thread(target=self.__listen, args=())
self.__thread.start() | def function[connect, parameter[self]]:
constant[
Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters
]
if compare[name[self].__ser is_not constant[None]] begin[:]
variable[serial] assign[=] call[name[importlib].import_module, parameter[constant[serial]]]
if compare[name[self].__stopbits equal[==] constant[0]] begin[:]
name[self].__ser.stopbits assign[=] name[serial].STOPBITS_ONE
if compare[name[self].__parity equal[==] constant[0]] begin[:]
name[self].__ser.parity assign[=] name[serial].PARITY_EVEN
name[self].__ser assign[=] call[name[serial].Serial, parameter[name[self].serialPort, name[self].__baudrate]]
name[self].__ser.writeTimeout assign[=] name[self].__timeout
if compare[name[self].__tcpClientSocket is_not constant[None]] begin[:]
call[name[self].__tcpClientSocket.settimeout, parameter[constant[5]]]
call[name[self].__tcpClientSocket.connect, parameter[tuple[[<ast.Attribute object at 0x7da1b1b01450>, <ast.Attribute object at 0x7da1b1b03d00>]]]]
name[self].__connected assign[=] constant[True]
name[self].__thread assign[=] call[name[threading].Thread, parameter[]]
call[name[self].__thread.start, parameter[]] | keyword[def] identifier[connect] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[__ser] keyword[is] keyword[not] keyword[None] ):
identifier[serial] = identifier[importlib] . identifier[import_module] ( literal[string] )
keyword[if] identifier[self] . identifier[__stopbits] == literal[int] :
identifier[self] . identifier[__ser] . identifier[stopbits] = identifier[serial] . identifier[STOPBITS_ONE]
keyword[elif] identifier[self] . identifier[__stopbits] == literal[int] :
identifier[self] . identifier[__ser] . identifier[stopbits] = identifier[serial] . identifier[STOPBITS_TWO]
keyword[elif] identifier[self] . identifier[__stopbits] == literal[int] :
identifier[self] . identifier[__ser] . identifier[stopbits] = identifier[serial] . identifier[STOPBITS_ONE_POINT_FIVE]
keyword[if] identifier[self] . identifier[__parity] == literal[int] :
identifier[self] . identifier[__ser] . identifier[parity] = identifier[serial] . identifier[PARITY_EVEN]
keyword[elif] identifier[self] . identifier[__parity] == literal[int] :
identifier[self] . identifier[__ser] . identifier[parity] = identifier[serial] . identifier[PARITY_ODD]
keyword[elif] identifier[self] . identifier[__parity] == literal[int] :
identifier[self] . identifier[__ser] . identifier[parity] = identifier[serial] . identifier[PARITY_NONE]
identifier[self] . identifier[__ser] = identifier[serial] . identifier[Serial] ( identifier[self] . identifier[serialPort] , identifier[self] . identifier[__baudrate] , identifier[timeout] = identifier[self] . identifier[__timeout] , identifier[parity] = identifier[self] . identifier[__ser] . identifier[parity] , identifier[stopbits] = identifier[self] . identifier[__ser] . identifier[stopbits] , identifier[xonxoff] = literal[int] , identifier[rtscts] = literal[int] )
identifier[self] . identifier[__ser] . identifier[writeTimeout] = identifier[self] . identifier[__timeout]
keyword[if] ( identifier[self] . identifier[__tcpClientSocket] keyword[is] keyword[not] keyword[None] ):
identifier[self] . identifier[__tcpClientSocket] . identifier[settimeout] ( literal[int] )
identifier[self] . identifier[__tcpClientSocket] . identifier[connect] (( identifier[self] . identifier[__ipAddress] , identifier[self] . identifier[__port] ))
identifier[self] . identifier[__connected] = keyword[True]
identifier[self] . identifier[__thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[__listen] , identifier[args] =())
identifier[self] . identifier[__thread] . identifier[start] () | def connect(self):
"""
Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters
"""
if self.__ser is not None:
serial = importlib.import_module('serial')
if self.__stopbits == 0:
self.__ser.stopbits = serial.STOPBITS_ONE # depends on [control=['if'], data=[]]
elif self.__stopbits == 1:
self.__ser.stopbits = serial.STOPBITS_TWO # depends on [control=['if'], data=[]]
elif self.__stopbits == 2:
self.__ser.stopbits = serial.STOPBITS_ONE_POINT_FIVE # depends on [control=['if'], data=[]]
if self.__parity == 0:
self.__ser.parity = serial.PARITY_EVEN # depends on [control=['if'], data=[]]
elif self.__parity == 1:
self.__ser.parity = serial.PARITY_ODD # depends on [control=['if'], data=[]]
elif self.__parity == 2:
self.__ser.parity = serial.PARITY_NONE # depends on [control=['if'], data=[]]
self.__ser = serial.Serial(self.serialPort, self.__baudrate, timeout=self.__timeout, parity=self.__ser.parity, stopbits=self.__ser.stopbits, xonxoff=0, rtscts=0)
self.__ser.writeTimeout = self.__timeout # depends on [control=['if'], data=[]]
#print (self.ser)
if self.__tcpClientSocket is not None:
self.__tcpClientSocket.settimeout(5)
self.__tcpClientSocket.connect((self.__ipAddress, self.__port))
self.__connected = True
self.__thread = threading.Thread(target=self.__listen, args=())
self.__thread.start() # depends on [control=['if'], data=[]] |
def getfragment(self, default=None, encoding='utf-8', errors='strict'):
"""Return the decoded fragment identifier, or `default` if the
original URI did not contain a fragment component.
"""
fragment = self.fragment
if fragment is not None:
return uridecode(fragment, encoding, errors)
else:
return default | def function[getfragment, parameter[self, default, encoding, errors]]:
constant[Return the decoded fragment identifier, or `default` if the
original URI did not contain a fragment component.
]
variable[fragment] assign[=] name[self].fragment
if compare[name[fragment] is_not constant[None]] begin[:]
return[call[name[uridecode], parameter[name[fragment], name[encoding], name[errors]]]] | keyword[def] identifier[getfragment] ( identifier[self] , identifier[default] = keyword[None] , identifier[encoding] = literal[string] , identifier[errors] = literal[string] ):
literal[string]
identifier[fragment] = identifier[self] . identifier[fragment]
keyword[if] identifier[fragment] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[uridecode] ( identifier[fragment] , identifier[encoding] , identifier[errors] )
keyword[else] :
keyword[return] identifier[default] | def getfragment(self, default=None, encoding='utf-8', errors='strict'):
"""Return the decoded fragment identifier, or `default` if the
original URI did not contain a fragment component.
"""
fragment = self.fragment
if fragment is not None:
return uridecode(fragment, encoding, errors) # depends on [control=['if'], data=['fragment']]
else:
return default |
def _makeplot(self, ax, fig, data, ymin=None, ymax=None, height=6,
width=6, dos=None, color=None):
"""Utility method to tidy phonon band structure diagrams. """
# Define colours
if color is None:
color = 'C0' # Default to first colour in matplotlib series
# set x and y limits
tymax = ymax if (ymax is not None) else max(flatten(data['frequency']))
tymin = ymin if (ymin is not None) else min(flatten(data['frequency']))
pad = (tymax - tymin) * 0.05
if ymin is None:
ymin = 0 if tymin >= self.imag_tol else tymin - pad
ymax = ymax if ymax else tymax + pad
ax.set_ylim(ymin, ymax)
ax.set_xlim(0, data['distances'][-1][-1])
if ymin < 0:
dashline = True
ax.axhline(0, color=rcParams['grid.color'], linestyle='--',
dashes=dashes,
zorder=0,
linewidth=rcParams['ytick.major.width'])
else:
dashline = False
if dos is not None:
self._plot_phonon_dos(dos, ax=fig.axes[1], color=color,
dashline=dashline)
else:
# keep correct aspect ratio; match axis to canvas
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
if width is None:
width = rcParams['figure.figsize'][0]
if height is None:
height = rcParams['figure.figsize'][1]
ax.set_aspect((height/width) * ((x1-x0)/(y1-y0))) | def function[_makeplot, parameter[self, ax, fig, data, ymin, ymax, height, width, dos, color]]:
constant[Utility method to tidy phonon band structure diagrams. ]
if compare[name[color] is constant[None]] begin[:]
variable[color] assign[=] constant[C0]
variable[tymax] assign[=] <ast.IfExp object at 0x7da1b24b3fa0>
variable[tymin] assign[=] <ast.IfExp object at 0x7da1b24b0ac0>
variable[pad] assign[=] binary_operation[binary_operation[name[tymax] - name[tymin]] * constant[0.05]]
if compare[name[ymin] is constant[None]] begin[:]
variable[ymin] assign[=] <ast.IfExp object at 0x7da1b24b1f00>
variable[ymax] assign[=] <ast.IfExp object at 0x7da1b24b0e80>
call[name[ax].set_ylim, parameter[name[ymin], name[ymax]]]
call[name[ax].set_xlim, parameter[constant[0], call[call[call[name[data]][constant[distances]]][<ast.UnaryOp object at 0x7da1b24b1b40>]][<ast.UnaryOp object at 0x7da1b24b1ab0>]]]
if compare[name[ymin] less[<] constant[0]] begin[:]
variable[dashline] assign[=] constant[True]
call[name[ax].axhline, parameter[constant[0]]]
if compare[name[dos] is_not constant[None]] begin[:]
call[name[self]._plot_phonon_dos, parameter[name[dos]]] | keyword[def] identifier[_makeplot] ( identifier[self] , identifier[ax] , identifier[fig] , identifier[data] , identifier[ymin] = keyword[None] , identifier[ymax] = keyword[None] , identifier[height] = literal[int] ,
identifier[width] = literal[int] , identifier[dos] = keyword[None] , identifier[color] = keyword[None] ):
literal[string]
keyword[if] identifier[color] keyword[is] keyword[None] :
identifier[color] = literal[string]
identifier[tymax] = identifier[ymax] keyword[if] ( identifier[ymax] keyword[is] keyword[not] keyword[None] ) keyword[else] identifier[max] ( identifier[flatten] ( identifier[data] [ literal[string] ]))
identifier[tymin] = identifier[ymin] keyword[if] ( identifier[ymin] keyword[is] keyword[not] keyword[None] ) keyword[else] identifier[min] ( identifier[flatten] ( identifier[data] [ literal[string] ]))
identifier[pad] =( identifier[tymax] - identifier[tymin] )* literal[int]
keyword[if] identifier[ymin] keyword[is] keyword[None] :
identifier[ymin] = literal[int] keyword[if] identifier[tymin] >= identifier[self] . identifier[imag_tol] keyword[else] identifier[tymin] - identifier[pad]
identifier[ymax] = identifier[ymax] keyword[if] identifier[ymax] keyword[else] identifier[tymax] + identifier[pad]
identifier[ax] . identifier[set_ylim] ( identifier[ymin] , identifier[ymax] )
identifier[ax] . identifier[set_xlim] ( literal[int] , identifier[data] [ literal[string] ][- literal[int] ][- literal[int] ])
keyword[if] identifier[ymin] < literal[int] :
identifier[dashline] = keyword[True]
identifier[ax] . identifier[axhline] ( literal[int] , identifier[color] = identifier[rcParams] [ literal[string] ], identifier[linestyle] = literal[string] ,
identifier[dashes] = identifier[dashes] ,
identifier[zorder] = literal[int] ,
identifier[linewidth] = identifier[rcParams] [ literal[string] ])
keyword[else] :
identifier[dashline] = keyword[False]
keyword[if] identifier[dos] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_plot_phonon_dos] ( identifier[dos] , identifier[ax] = identifier[fig] . identifier[axes] [ literal[int] ], identifier[color] = identifier[color] ,
identifier[dashline] = identifier[dashline] )
keyword[else] :
identifier[x0] , identifier[x1] = identifier[ax] . identifier[get_xlim] ()
identifier[y0] , identifier[y1] = identifier[ax] . identifier[get_ylim] ()
keyword[if] identifier[width] keyword[is] keyword[None] :
identifier[width] = identifier[rcParams] [ literal[string] ][ literal[int] ]
keyword[if] identifier[height] keyword[is] keyword[None] :
identifier[height] = identifier[rcParams] [ literal[string] ][ literal[int] ]
identifier[ax] . identifier[set_aspect] (( identifier[height] / identifier[width] )*(( identifier[x1] - identifier[x0] )/( identifier[y1] - identifier[y0] ))) | def _makeplot(self, ax, fig, data, ymin=None, ymax=None, height=6, width=6, dos=None, color=None):
"""Utility method to tidy phonon band structure diagrams. """
# Define colours
if color is None:
color = 'C0' # Default to first colour in matplotlib series # depends on [control=['if'], data=['color']]
# set x and y limits
tymax = ymax if ymax is not None else max(flatten(data['frequency']))
tymin = ymin if ymin is not None else min(flatten(data['frequency']))
pad = (tymax - tymin) * 0.05
if ymin is None:
ymin = 0 if tymin >= self.imag_tol else tymin - pad # depends on [control=['if'], data=['ymin']]
ymax = ymax if ymax else tymax + pad
ax.set_ylim(ymin, ymax)
ax.set_xlim(0, data['distances'][-1][-1])
if ymin < 0:
dashline = True
ax.axhline(0, color=rcParams['grid.color'], linestyle='--', dashes=dashes, zorder=0, linewidth=rcParams['ytick.major.width']) # depends on [control=['if'], data=[]]
else:
dashline = False
if dos is not None:
self._plot_phonon_dos(dos, ax=fig.axes[1], color=color, dashline=dashline) # depends on [control=['if'], data=['dos']]
else:
# keep correct aspect ratio; match axis to canvas
(x0, x1) = ax.get_xlim()
(y0, y1) = ax.get_ylim()
if width is None:
width = rcParams['figure.figsize'][0] # depends on [control=['if'], data=['width']]
if height is None:
height = rcParams['figure.figsize'][1] # depends on [control=['if'], data=['height']]
ax.set_aspect(height / width * ((x1 - x0) / (y1 - y0))) |
def clearLocalServices(self):
'send Bye messages for the services and remove them'
for service in list(self._localServices.values()):
self._sendBye(service)
self._localServices.clear() | def function[clearLocalServices, parameter[self]]:
constant[send Bye messages for the services and remove them]
for taget[name[service]] in starred[call[name[list], parameter[call[name[self]._localServices.values, parameter[]]]]] begin[:]
call[name[self]._sendBye, parameter[name[service]]]
call[name[self]._localServices.clear, parameter[]] | keyword[def] identifier[clearLocalServices] ( identifier[self] ):
literal[string]
keyword[for] identifier[service] keyword[in] identifier[list] ( identifier[self] . identifier[_localServices] . identifier[values] ()):
identifier[self] . identifier[_sendBye] ( identifier[service] )
identifier[self] . identifier[_localServices] . identifier[clear] () | def clearLocalServices(self):
"""send Bye messages for the services and remove them"""
for service in list(self._localServices.values()):
self._sendBye(service) # depends on [control=['for'], data=['service']]
self._localServices.clear() |
def to_agraph(
self,
indicators: bool = False,
indicator_values: bool = False,
nodes_to_highlight=None,
*args,
**kwargs,
):
""" Exports the CAG as a pygraphviz AGraph for visualization.
Args:
indicators: Whether to display indicators in the AGraph
indicator_values: Whether to display indicator values in the AGraph
nodes_to_highlight: Nodes to highlight in the AGraph.
Returns:
A PyGraphviz AGraph representation of the AnalysisGraph.
"""
from delphi.utils.misc import choose_font
FONT = choose_font()
A = nx.nx_agraph.to_agraph(self)
A.graph_attr.update(
{
"dpi": 227,
"fontsize": 20,
"rankdir": kwargs.get("rankdir", "TB"),
"fontname": FONT,
"overlap": "scale",
"splines": True,
}
)
A.node_attr.update(
{
"shape": "rectangle",
"color": "black",
# "color": "#650021",
"style": "rounded",
"fontname": FONT,
}
)
nodes_with_indicators = [
n
for n in self.nodes(data=True)
if n[1].get("indicators") is not None
]
n_max = max(
[
sum([len(s.evidence) for s in e[2]["InfluenceStatements"]])
for e in self.edges(data=True)
]
)
nodeset = {n.split("/")[-1] for n in self.nodes}
simplified_labels = len(nodeset) == len(self)
color_str = "#650021"
for n in self.nodes(data=True):
if kwargs.get("values"):
node_label = (
n[0].capitalize().replace("_", " ")
+ " ("
+ str(np.mean(n[1]["rv"].dataset))
+ ")"
)
else:
node_label = (
n[0].split("/")[-1].replace("_", " ").capitalize()
if simplified_labels
else n[0]
)
A.add_node(n[0], label=node_label)
if list(self.edges(data=True))[0][2].get("βs") is not None:
max_median_betas = max(
[abs(np.median(e[2]["βs"])) for e in self.edges(data=True)]
)
for e in self.edges(data=True):
# Calculate reinforcement (ad-hoc!)
sts = e[2]["InfluenceStatements"]
total_evidence_pieces = sum([len(s.evidence) for s in sts])
reinforcement = (
sum(
[
stmt.overall_polarity() * len(stmt.evidence)
for stmt in sts
]
)
/ total_evidence_pieces
)
opacity = total_evidence_pieces / n_max
h = (opacity * 255).hex()
if list(self.edges(data=True))[0][2].get("βs") is not None:
penwidth = 3 * abs(np.median(e[2]["βs"]) / max_median_betas)
cmap = cm.Greens if reinforcement > 0 else cm.Reds
c_str = (
matplotlib.colors.rgb2hex(cmap(abs(reinforcement)))
+ h[4:6]
)
else:
penwidth = 1
c_str = "black"
A.add_edge(e[0], e[1], color=c_str, penwidth=penwidth)
# Drawing indicator variables
if indicators:
for n in nodes_with_indicators:
for indicator_name, ind in n[1]["indicators"].items():
node_label = _insert_line_breaks(
ind.name.replace("_", " "), 30
)
if indicator_values:
if ind.unit is not None:
units = f" {ind.unit}"
else:
units = ""
if ind.mean is not None:
ind_value = "{:.2f}".format(ind.mean)
node_label = (
f"{node_label}\n{ind_value} {ind.unit}"
f"\nSource: {ind.source}"
f"\nAggregation axes: {ind.aggaxes}"
f"\nAggregation method: {ind.aggregation_method}"
)
A.add_node(
indicator_name,
style="rounded, filled",
fillcolor="lightblue",
label=node_label,
)
A.add_edge(n[0], indicator_name, color="royalblue4")
if nodes_to_highlight is not None:
nodes = kwargs.pop("nodes_to_highlight")
if isinstance(nodes, list):
for n in nodes:
if n in A.nodes():
A.add_node(n, fontcolor="royalblue")
elif isinstance(nodes, str):
if n in A.nodes():
A.add_node(nodes, fontcolor="royalblue")
if kwargs.get("graph_label") is not None:
A.graph_attr["label"] = kwargs["graph_label"]
return A | def function[to_agraph, parameter[self, indicators, indicator_values, nodes_to_highlight]]:
constant[ Exports the CAG as a pygraphviz AGraph for visualization.
Args:
indicators: Whether to display indicators in the AGraph
indicator_values: Whether to display indicator values in the AGraph
nodes_to_highlight: Nodes to highlight in the AGraph.
Returns:
A PyGraphviz AGraph representation of the AnalysisGraph.
]
from relative_module[delphi.utils.misc] import module[choose_font]
variable[FONT] assign[=] call[name[choose_font], parameter[]]
variable[A] assign[=] call[name[nx].nx_agraph.to_agraph, parameter[name[self]]]
call[name[A].graph_attr.update, parameter[dictionary[[<ast.Constant object at 0x7da1b044f070>, <ast.Constant object at 0x7da1b044f010>, <ast.Constant object at 0x7da1b044ef80>, <ast.Constant object at 0x7da1b044efe0>, <ast.Constant object at 0x7da1b044efb0>, <ast.Constant object at 0x7da1b044f040>], [<ast.Constant object at 0x7da1b044dc00>, <ast.Constant object at 0x7da1b044dbd0>, <ast.Call object at 0x7da1b044dba0>, <ast.Name object at 0x7da1b044c580>, <ast.Constant object at 0x7da1b044c550>, <ast.Constant object at 0x7da1b044c520>]]]]
call[name[A].node_attr.update, parameter[dictionary[[<ast.Constant object at 0x7da1b044c4f0>, <ast.Constant object at 0x7da1b044d210>, <ast.Constant object at 0x7da1b044d240>, <ast.Constant object at 0x7da1b044da20>], [<ast.Constant object at 0x7da1b044d9f0>, <ast.Constant object at 0x7da1b044d9c0>, <ast.Constant object at 0x7da1b044d990>, <ast.Name object at 0x7da1b044d960>]]]]
variable[nodes_with_indicators] assign[=] <ast.ListComp object at 0x7da1b044d4b0>
variable[n_max] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da1b044ee00>]]
variable[nodeset] assign[=] <ast.SetComp object at 0x7da1b044eaa0>
variable[simplified_labels] assign[=] compare[call[name[len], parameter[name[nodeset]]] equal[==] call[name[len], parameter[name[self]]]]
variable[color_str] assign[=] constant[#650021]
for taget[name[n]] in starred[call[name[self].nodes, parameter[]]] begin[:]
if call[name[kwargs].get, parameter[constant[values]]] begin[:]
variable[node_label] assign[=] binary_operation[binary_operation[binary_operation[call[call[call[name[n]][constant[0]].capitalize, parameter[]].replace, parameter[constant[_], constant[ ]]] + constant[ (]] + call[name[str], parameter[call[name[np].mean, parameter[call[call[name[n]][constant[1]]][constant[rv]].dataset]]]]] + constant[)]]
call[name[A].add_node, parameter[call[name[n]][constant[0]]]]
if compare[call[call[call[call[name[list], parameter[call[name[self].edges, parameter[]]]]][constant[0]]][constant[2]].get, parameter[constant[βs]]] is_not constant[None]] begin[:]
variable[max_median_betas] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da1b044e2f0>]]
for taget[name[e]] in starred[call[name[self].edges, parameter[]]] begin[:]
variable[sts] assign[=] call[call[name[e]][constant[2]]][constant[InfluenceStatements]]
variable[total_evidence_pieces] assign[=] call[name[sum], parameter[<ast.ListComp object at 0x7da1b044c4c0>]]
variable[reinforcement] assign[=] binary_operation[call[name[sum], parameter[<ast.ListComp object at 0x7da1b044e590>]] / name[total_evidence_pieces]]
variable[opacity] assign[=] binary_operation[name[total_evidence_pieces] / name[n_max]]
variable[h] assign[=] call[binary_operation[name[opacity] * constant[255]].hex, parameter[]]
if compare[call[call[call[call[name[list], parameter[call[name[self].edges, parameter[]]]]][constant[0]]][constant[2]].get, parameter[constant[βs]]] is_not constant[None]] begin[:]
variable[penwidth] assign[=] binary_operation[constant[3] * call[name[abs], parameter[binary_operation[call[name[np].median, parameter[call[call[name[e]][constant[2]]][constant[βs]]]] / name[max_median_betas]]]]]
variable[cmap] assign[=] <ast.IfExp object at 0x7da1b049a050>
variable[c_str] assign[=] binary_operation[call[name[matplotlib].colors.rgb2hex, parameter[call[name[cmap], parameter[call[name[abs], parameter[name[reinforcement]]]]]]] + call[name[h]][<ast.Slice object at 0x7da1b04987f0>]]
call[name[A].add_edge, parameter[call[name[e]][constant[0]], call[name[e]][constant[1]]]]
if name[indicators] begin[:]
for taget[name[n]] in starred[name[nodes_with_indicators]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b04989a0>, <ast.Name object at 0x7da1b04995d0>]]] in starred[call[call[call[name[n]][constant[1]]][constant[indicators]].items, parameter[]]] begin[:]
variable[node_label] assign[=] call[name[_insert_line_breaks], parameter[call[name[ind].name.replace, parameter[constant[_], constant[ ]]], constant[30]]]
if name[indicator_values] begin[:]
if compare[name[ind].unit is_not constant[None]] begin[:]
variable[units] assign[=] <ast.JoinedStr object at 0x7da1b0498100>
if compare[name[ind].mean is_not constant[None]] begin[:]
variable[ind_value] assign[=] call[constant[{:.2f}].format, parameter[name[ind].mean]]
variable[node_label] assign[=] <ast.JoinedStr object at 0x7da1b0499270>
call[name[A].add_node, parameter[name[indicator_name]]]
call[name[A].add_edge, parameter[call[name[n]][constant[0]], name[indicator_name]]]
if compare[name[nodes_to_highlight] is_not constant[None]] begin[:]
variable[nodes] assign[=] call[name[kwargs].pop, parameter[constant[nodes_to_highlight]]]
if call[name[isinstance], parameter[name[nodes], name[list]]] begin[:]
for taget[name[n]] in starred[name[nodes]] begin[:]
if compare[name[n] in call[name[A].nodes, parameter[]]] begin[:]
call[name[A].add_node, parameter[name[n]]]
if compare[call[name[kwargs].get, parameter[constant[graph_label]]] is_not constant[None]] begin[:]
call[name[A].graph_attr][constant[label]] assign[=] call[name[kwargs]][constant[graph_label]]
return[name[A]] | keyword[def] identifier[to_agraph] (
identifier[self] ,
identifier[indicators] : identifier[bool] = keyword[False] ,
identifier[indicator_values] : identifier[bool] = keyword[False] ,
identifier[nodes_to_highlight] = keyword[None] ,
* identifier[args] ,
** identifier[kwargs] ,
):
literal[string]
keyword[from] identifier[delphi] . identifier[utils] . identifier[misc] keyword[import] identifier[choose_font]
identifier[FONT] = identifier[choose_font] ()
identifier[A] = identifier[nx] . identifier[nx_agraph] . identifier[to_agraph] ( identifier[self] )
identifier[A] . identifier[graph_attr] . identifier[update] (
{
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[FONT] ,
literal[string] : literal[string] ,
literal[string] : keyword[True] ,
}
)
identifier[A] . identifier[node_attr] . identifier[update] (
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[FONT] ,
}
)
identifier[nodes_with_indicators] =[
identifier[n]
keyword[for] identifier[n] keyword[in] identifier[self] . identifier[nodes] ( identifier[data] = keyword[True] )
keyword[if] identifier[n] [ literal[int] ]. identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None]
]
identifier[n_max] = identifier[max] (
[
identifier[sum] ([ identifier[len] ( identifier[s] . identifier[evidence] ) keyword[for] identifier[s] keyword[in] identifier[e] [ literal[int] ][ literal[string] ]])
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[edges] ( identifier[data] = keyword[True] )
]
)
identifier[nodeset] ={ identifier[n] . identifier[split] ( literal[string] )[- literal[int] ] keyword[for] identifier[n] keyword[in] identifier[self] . identifier[nodes] }
identifier[simplified_labels] = identifier[len] ( identifier[nodeset] )== identifier[len] ( identifier[self] )
identifier[color_str] = literal[string]
keyword[for] identifier[n] keyword[in] identifier[self] . identifier[nodes] ( identifier[data] = keyword[True] ):
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[node_label] =(
identifier[n] [ literal[int] ]. identifier[capitalize] (). identifier[replace] ( literal[string] , literal[string] )
+ literal[string]
+ identifier[str] ( identifier[np] . identifier[mean] ( identifier[n] [ literal[int] ][ literal[string] ]. identifier[dataset] ))
+ literal[string]
)
keyword[else] :
identifier[node_label] =(
identifier[n] [ literal[int] ]. identifier[split] ( literal[string] )[- literal[int] ]. identifier[replace] ( literal[string] , literal[string] ). identifier[capitalize] ()
keyword[if] identifier[simplified_labels]
keyword[else] identifier[n] [ literal[int] ]
)
identifier[A] . identifier[add_node] ( identifier[n] [ literal[int] ], identifier[label] = identifier[node_label] )
keyword[if] identifier[list] ( identifier[self] . identifier[edges] ( identifier[data] = keyword[True] ))[ literal[int] ][ literal[int] ]. identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[max_median_betas] = identifier[max] (
[ identifier[abs] ( identifier[np] . identifier[median] ( identifier[e] [ literal[int] ][ literal[string] ])) keyword[for] identifier[e] keyword[in] identifier[self] . identifier[edges] ( identifier[data] = keyword[True] )]
)
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[edges] ( identifier[data] = keyword[True] ):
identifier[sts] = identifier[e] [ literal[int] ][ literal[string] ]
identifier[total_evidence_pieces] = identifier[sum] ([ identifier[len] ( identifier[s] . identifier[evidence] ) keyword[for] identifier[s] keyword[in] identifier[sts] ])
identifier[reinforcement] =(
identifier[sum] (
[
identifier[stmt] . identifier[overall_polarity] ()* identifier[len] ( identifier[stmt] . identifier[evidence] )
keyword[for] identifier[stmt] keyword[in] identifier[sts]
]
)
/ identifier[total_evidence_pieces]
)
identifier[opacity] = identifier[total_evidence_pieces] / identifier[n_max]
identifier[h] =( identifier[opacity] * literal[int] ). identifier[hex] ()
keyword[if] identifier[list] ( identifier[self] . identifier[edges] ( identifier[data] = keyword[True] ))[ literal[int] ][ literal[int] ]. identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[penwidth] = literal[int] * identifier[abs] ( identifier[np] . identifier[median] ( identifier[e] [ literal[int] ][ literal[string] ])/ identifier[max_median_betas] )
identifier[cmap] = identifier[cm] . identifier[Greens] keyword[if] identifier[reinforcement] > literal[int] keyword[else] identifier[cm] . identifier[Reds]
identifier[c_str] =(
identifier[matplotlib] . identifier[colors] . identifier[rgb2hex] ( identifier[cmap] ( identifier[abs] ( identifier[reinforcement] )))
+ identifier[h] [ literal[int] : literal[int] ]
)
keyword[else] :
identifier[penwidth] = literal[int]
identifier[c_str] = literal[string]
identifier[A] . identifier[add_edge] ( identifier[e] [ literal[int] ], identifier[e] [ literal[int] ], identifier[color] = identifier[c_str] , identifier[penwidth] = identifier[penwidth] )
keyword[if] identifier[indicators] :
keyword[for] identifier[n] keyword[in] identifier[nodes_with_indicators] :
keyword[for] identifier[indicator_name] , identifier[ind] keyword[in] identifier[n] [ literal[int] ][ literal[string] ]. identifier[items] ():
identifier[node_label] = identifier[_insert_line_breaks] (
identifier[ind] . identifier[name] . identifier[replace] ( literal[string] , literal[string] ), literal[int]
)
keyword[if] identifier[indicator_values] :
keyword[if] identifier[ind] . identifier[unit] keyword[is] keyword[not] keyword[None] :
identifier[units] = literal[string]
keyword[else] :
identifier[units] = literal[string]
keyword[if] identifier[ind] . identifier[mean] keyword[is] keyword[not] keyword[None] :
identifier[ind_value] = literal[string] . identifier[format] ( identifier[ind] . identifier[mean] )
identifier[node_label] =(
literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[A] . identifier[add_node] (
identifier[indicator_name] ,
identifier[style] = literal[string] ,
identifier[fillcolor] = literal[string] ,
identifier[label] = identifier[node_label] ,
)
identifier[A] . identifier[add_edge] ( identifier[n] [ literal[int] ], identifier[indicator_name] , identifier[color] = literal[string] )
keyword[if] identifier[nodes_to_highlight] keyword[is] keyword[not] keyword[None] :
identifier[nodes] = identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[nodes] , identifier[list] ):
keyword[for] identifier[n] keyword[in] identifier[nodes] :
keyword[if] identifier[n] keyword[in] identifier[A] . identifier[nodes] ():
identifier[A] . identifier[add_node] ( identifier[n] , identifier[fontcolor] = literal[string] )
keyword[elif] identifier[isinstance] ( identifier[nodes] , identifier[str] ):
keyword[if] identifier[n] keyword[in] identifier[A] . identifier[nodes] ():
identifier[A] . identifier[add_node] ( identifier[nodes] , identifier[fontcolor] = literal[string] )
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[A] . identifier[graph_attr] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
keyword[return] identifier[A] | def to_agraph(self, indicators: bool=False, indicator_values: bool=False, nodes_to_highlight=None, *args, **kwargs):
""" Exports the CAG as a pygraphviz AGraph for visualization.
Args:
indicators: Whether to display indicators in the AGraph
indicator_values: Whether to display indicator values in the AGraph
nodes_to_highlight: Nodes to highlight in the AGraph.
Returns:
A PyGraphviz AGraph representation of the AnalysisGraph.
"""
from delphi.utils.misc import choose_font
FONT = choose_font()
A = nx.nx_agraph.to_agraph(self)
A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'rankdir': kwargs.get('rankdir', 'TB'), 'fontname': FONT, 'overlap': 'scale', 'splines': True})
# "color": "#650021",
A.node_attr.update({'shape': 'rectangle', 'color': 'black', 'style': 'rounded', 'fontname': FONT})
nodes_with_indicators = [n for n in self.nodes(data=True) if n[1].get('indicators') is not None]
n_max = max([sum([len(s.evidence) for s in e[2]['InfluenceStatements']]) for e in self.edges(data=True)])
nodeset = {n.split('/')[-1] for n in self.nodes}
simplified_labels = len(nodeset) == len(self)
color_str = '#650021'
for n in self.nodes(data=True):
if kwargs.get('values'):
node_label = n[0].capitalize().replace('_', ' ') + ' (' + str(np.mean(n[1]['rv'].dataset)) + ')' # depends on [control=['if'], data=[]]
else:
node_label = n[0].split('/')[-1].replace('_', ' ').capitalize() if simplified_labels else n[0]
A.add_node(n[0], label=node_label) # depends on [control=['for'], data=['n']]
if list(self.edges(data=True))[0][2].get('βs') is not None:
max_median_betas = max([abs(np.median(e[2]['βs'])) for e in self.edges(data=True)]) # depends on [control=['if'], data=[]]
for e in self.edges(data=True):
# Calculate reinforcement (ad-hoc!)
sts = e[2]['InfluenceStatements']
total_evidence_pieces = sum([len(s.evidence) for s in sts])
reinforcement = sum([stmt.overall_polarity() * len(stmt.evidence) for stmt in sts]) / total_evidence_pieces
opacity = total_evidence_pieces / n_max
h = (opacity * 255).hex()
if list(self.edges(data=True))[0][2].get('βs') is not None:
penwidth = 3 * abs(np.median(e[2]['βs']) / max_median_betas)
cmap = cm.Greens if reinforcement > 0 else cm.Reds
c_str = matplotlib.colors.rgb2hex(cmap(abs(reinforcement))) + h[4:6] # depends on [control=['if'], data=[]]
else:
penwidth = 1
c_str = 'black'
A.add_edge(e[0], e[1], color=c_str, penwidth=penwidth) # depends on [control=['for'], data=['e']]
# Drawing indicator variables
if indicators:
for n in nodes_with_indicators:
for (indicator_name, ind) in n[1]['indicators'].items():
node_label = _insert_line_breaks(ind.name.replace('_', ' '), 30)
if indicator_values:
if ind.unit is not None:
units = f' {ind.unit}' # depends on [control=['if'], data=[]]
else:
units = ''
if ind.mean is not None:
ind_value = '{:.2f}'.format(ind.mean)
node_label = f'{node_label}\n{ind_value} {ind.unit}\nSource: {ind.source}\nAggregation axes: {ind.aggaxes}\nAggregation method: {ind.aggregation_method}' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
A.add_node(indicator_name, style='rounded, filled', fillcolor='lightblue', label=node_label)
A.add_edge(n[0], indicator_name, color='royalblue4') # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['n']] # depends on [control=['if'], data=[]]
if nodes_to_highlight is not None:
nodes = kwargs.pop('nodes_to_highlight')
if isinstance(nodes, list):
for n in nodes:
if n in A.nodes():
A.add_node(n, fontcolor='royalblue') # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['n']] # depends on [control=['if'], data=[]]
elif isinstance(nodes, str):
if n in A.nodes():
A.add_node(nodes, fontcolor='royalblue') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if kwargs.get('graph_label') is not None:
A.graph_attr['label'] = kwargs['graph_label'] # depends on [control=['if'], data=[]]
return A |
def get_subnets(context, limit=None, page_reverse=False, sorts=['id'],
marker=None, filters=None, fields=None):
"""Retrieve a list of subnets.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_subnets for tenant %s with filters %s fields %s" %
(context.tenant_id, filters, fields))
filters = filters or {}
subnets = db_api.subnet_find(context, limit=limit,
page_reverse=page_reverse, sorts=sorts,
marker_obj=marker, join_dns=True,
join_routes=True, join_pool=True, **filters)
for subnet in subnets:
cache = subnet.get("_allocation_pool_cache")
if not cache:
db_api.subnet_update_set_alloc_pool_cache(
context, subnet, subnet.allocation_pools)
return v._make_subnets_list(subnets, fields=fields) | def function[get_subnets, parameter[context, limit, page_reverse, sorts, marker, filters, fields]]:
constant[Retrieve a list of subnets.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
]
call[name[LOG].info, parameter[binary_operation[constant[get_subnets for tenant %s with filters %s fields %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b00cab90>, <ast.Name object at 0x7da1b00cb7c0>, <ast.Name object at 0x7da1b00cb790>]]]]]
variable[filters] assign[=] <ast.BoolOp object at 0x7da1b00c8b80>
variable[subnets] assign[=] call[name[db_api].subnet_find, parameter[name[context]]]
for taget[name[subnet]] in starred[name[subnets]] begin[:]
variable[cache] assign[=] call[name[subnet].get, parameter[constant[_allocation_pool_cache]]]
if <ast.UnaryOp object at 0x7da1b00ca3b0> begin[:]
call[name[db_api].subnet_update_set_alloc_pool_cache, parameter[name[context], name[subnet], name[subnet].allocation_pools]]
return[call[name[v]._make_subnets_list, parameter[name[subnets]]]] | keyword[def] identifier[get_subnets] ( identifier[context] , identifier[limit] = keyword[None] , identifier[page_reverse] = keyword[False] , identifier[sorts] =[ literal[string] ],
identifier[marker] = keyword[None] , identifier[filters] = keyword[None] , identifier[fields] = keyword[None] ):
literal[string]
identifier[LOG] . identifier[info] ( literal[string] %
( identifier[context] . identifier[tenant_id] , identifier[filters] , identifier[fields] ))
identifier[filters] = identifier[filters] keyword[or] {}
identifier[subnets] = identifier[db_api] . identifier[subnet_find] ( identifier[context] , identifier[limit] = identifier[limit] ,
identifier[page_reverse] = identifier[page_reverse] , identifier[sorts] = identifier[sorts] ,
identifier[marker_obj] = identifier[marker] , identifier[join_dns] = keyword[True] ,
identifier[join_routes] = keyword[True] , identifier[join_pool] = keyword[True] ,** identifier[filters] )
keyword[for] identifier[subnet] keyword[in] identifier[subnets] :
identifier[cache] = identifier[subnet] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[cache] :
identifier[db_api] . identifier[subnet_update_set_alloc_pool_cache] (
identifier[context] , identifier[subnet] , identifier[subnet] . identifier[allocation_pools] )
keyword[return] identifier[v] . identifier[_make_subnets_list] ( identifier[subnets] , identifier[fields] = identifier[fields] ) | def get_subnets(context, limit=None, page_reverse=False, sorts=['id'], marker=None, filters=None, fields=None):
"""Retrieve a list of subnets.
The contents of the list depends on the identity of the user
making the request (as indicated by the context) as well as any
filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a subnet as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
: param fields: a list of strings that are valid keys in a
subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info('get_subnets for tenant %s with filters %s fields %s' % (context.tenant_id, filters, fields))
filters = filters or {}
subnets = db_api.subnet_find(context, limit=limit, page_reverse=page_reverse, sorts=sorts, marker_obj=marker, join_dns=True, join_routes=True, join_pool=True, **filters)
for subnet in subnets:
cache = subnet.get('_allocation_pool_cache')
if not cache:
db_api.subnet_update_set_alloc_pool_cache(context, subnet, subnet.allocation_pools) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subnet']]
return v._make_subnets_list(subnets, fields=fields) |
def update_my_account(self, body, **kwargs): # noqa: E501
"""Updates attributes of the account. # noqa: E501
An endpoint for updating the account. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/me -d '{\"phone_number\": \"12345678\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_my_account(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param AccountUpdateReq body: Details of the account to be updated. (required)
:return: AccountInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_my_account_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.update_my_account_with_http_info(body, **kwargs) # noqa: E501
return data | def function[update_my_account, parameter[self, body]]:
constant[Updates attributes of the account. # noqa: E501
An endpoint for updating the account. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/me -d '{"phone_number": "12345678"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.update_my_account(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param AccountUpdateReq body: Details of the account to be updated. (required)
:return: AccountInfo
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:]
return[call[name[self].update_my_account_with_http_info, parameter[name[body]]]] | keyword[def] identifier[update_my_account] ( identifier[self] , identifier[body] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[update_my_account_with_http_info] ( identifier[body] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[update_my_account_with_http_info] ( identifier[body] ,** identifier[kwargs] )
keyword[return] identifier[data] | def update_my_account(self, body, **kwargs): # noqa: E501
'Updates attributes of the account. # noqa: E501\n\n An endpoint for updating the account. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/me -d \'{"phone_number": "12345678"}\' -H \'content-type: application/json\' -H \'Authorization: Bearer API_KEY\'` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.update_my_account(body, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param AccountUpdateReq body: Details of the account to be updated. (required)\n :return: AccountInfo\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.update_my_account_with_http_info(body, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.update_my_account_with_http_info(body, **kwargs) # noqa: E501
return data |
def Enable(self, value):
"enable or disable all menu items"
for i in range(self.GetMenuItemCount()):
it = self.FindItemByPosition(i)
it.Enable(value) | def function[Enable, parameter[self, value]]:
constant[enable or disable all menu items]
for taget[name[i]] in starred[call[name[range], parameter[call[name[self].GetMenuItemCount, parameter[]]]]] begin[:]
variable[it] assign[=] call[name[self].FindItemByPosition, parameter[name[i]]]
call[name[it].Enable, parameter[name[value]]] | keyword[def] identifier[Enable] ( identifier[self] , identifier[value] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[GetMenuItemCount] ()):
identifier[it] = identifier[self] . identifier[FindItemByPosition] ( identifier[i] )
identifier[it] . identifier[Enable] ( identifier[value] ) | def Enable(self, value):
"""enable or disable all menu items"""
for i in range(self.GetMenuItemCount()):
it = self.FindItemByPosition(i)
it.Enable(value) # depends on [control=['for'], data=['i']] |
def fatigue_eval_med_freq(data, sample_rate, time_units=True, raw_to_mv=True,
device="biosignalsplux", resolution=16, show_plot=False):
"""
-----
Brief
-----
Returns the evolution time series of EMG median frequency along the acquisition, based on a sliding window
mechanism.
-----------
Description
-----------
The median frequency of activation events in EMG signal is particularly important in fatigue evaluation methods.
This function calculates the median frequency of each activation period and allows to plot those values in order to
see the temporal evolution of this particular feature.
----------
Parameters
----------
data : list
EMG signal.
sample_rate : int
Sampling frequency.
time_units : boolean
If True this function will return the x axis samples in seconds.
raw_to_mv : boolean
If True then it is assumed that the input samples are in a raw format and the output
results will be in mV. When True "device" and "resolution" inputs became mandatory.
device : str
Plux device label:
- "bioplux"
- "bioplux_exp"
- "biosignalsplux"
- "rachimeter"
- "channeller"
- "swifter"
- "ddme_openbanplux"
resolution : int
Resolution selected during acquisition.
show_plot : boolean
If True, then a figure with the median frequency evolution will be shown.
Returns
-------
out : pandas.DataFrame
DataFrame with the time and the sequence of median frequency evolution.
"""
# Conversion of data samples to mV if requested by raw_to_mv input.
if raw_to_mv is True:
data = raw_to_phy("EMG", device, data, resolution, option="mV")
# Definition of the time axis.
if time_units is True:
time = numpy.linspace(0, len(data) / sample_rate, len(data))
else:
time = numpy.linspace(0, len(data) - 1, len(data))
# Detection of muscular activation periods.
burst_begin, burst_end = detect_emg_activations(data, sample_rate, smooth_level=20,
threshold_level=10, time_units=False,
volts=True, resolution=resolution,
device=device, plot_result=False)[:2]
# Iteration along bursts.
median_freq_data = []
median_freq_time = []
for burst in range(0, len(burst_begin)):
processing_window = data[burst_begin[burst]:burst_end[burst]]
central_point = (burst_begin[burst] + burst_end[burst]) / 2
median_freq_time.append(central_point / sample_rate)
# Generation of the processing window power spectrum.
freqs, power = scisignal.welch(processing_window, fs=sample_rate, window='hanning',
noverlap=0, nfft=int(256.))
# Determination of median power frequency.
area_freq = integr.cumtrapz(power, freqs, initial=0)
total_power = area_freq[-1]
median_freq_data.append(freqs[numpy.where(area_freq >= total_power / 2)[0][0]])
# Graphical Representation step.
if show_plot is True:
list_figures_1 = plot([list(time), list(median_freq_time)],
[list(data), list(median_freq_data)],
title=["EMG Acquisition highlighting bursts",
"Median Frequency Evolution"], gridPlot=True,
gridLines=2, gridColumns=1, openSignalsStyle=True,
x_axis_label="Time (s)",
yAxisLabel=["Raw Data", "Median Frequency (Hz)"],
x_range=[0, 125], show_plot=False)
# Highlighting processing window.
for burst in range(0, len(burst_begin)):
color = opensignals_color_pallet()
box_annotation = BoxAnnotation(left=burst_begin[burst] / sample_rate,
right=burst_end[burst] / sample_rate, fill_color=color,
fill_alpha=0.1)
box_annotation_copy = BoxAnnotation(left=burst_begin[burst] / sample_rate,
right=burst_end[burst] / sample_rate,
fill_color=color, fill_alpha=0.1)
list_figures_1[0].add_layout(box_annotation)
list_figures_1[1].add_layout(box_annotation_copy)
gridplot_1 = gridplot([[list_figures_1[0]], [list_figures_1[1]]],
**opensignals_kwargs("gridplot"))
show(gridplot_1)
# pandas.DataFrame(a, columns=a.keys())
# pandas.DataFrame([a], columns=a.keys())
return pandas.DataFrame({"Time (s)": median_freq_time,
"Median Frequency (Hz)": median_freq_data},
columns=["Time (s)", "Median Frequency (Hz)"]) | def function[fatigue_eval_med_freq, parameter[data, sample_rate, time_units, raw_to_mv, device, resolution, show_plot]]:
constant[
-----
Brief
-----
Returns the evolution time series of EMG median frequency along the acquisition, based on a sliding window
mechanism.
-----------
Description
-----------
The median frequency of activation events in EMG signal is particularly important in fatigue evaluation methods.
This function calculates the median frequency of each activation period and allows to plot those values in order to
see the temporal evolution of this particular feature.
----------
Parameters
----------
data : list
EMG signal.
sample_rate : int
Sampling frequency.
time_units : boolean
If True this function will return the x axis samples in seconds.
raw_to_mv : boolean
If True then it is assumed that the input samples are in a raw format and the output
results will be in mV. When True "device" and "resolution" inputs became mandatory.
device : str
Plux device label:
- "bioplux"
- "bioplux_exp"
- "biosignalsplux"
- "rachimeter"
- "channeller"
- "swifter"
- "ddme_openbanplux"
resolution : int
Resolution selected during acquisition.
show_plot : boolean
If True, then a figure with the median frequency evolution will be shown.
Returns
-------
out : pandas.DataFrame
DataFrame with the time and the sequence of median frequency evolution.
]
if compare[name[raw_to_mv] is constant[True]] begin[:]
variable[data] assign[=] call[name[raw_to_phy], parameter[constant[EMG], name[device], name[data], name[resolution]]]
if compare[name[time_units] is constant[True]] begin[:]
variable[time] assign[=] call[name[numpy].linspace, parameter[constant[0], binary_operation[call[name[len], parameter[name[data]]] / name[sample_rate]], call[name[len], parameter[name[data]]]]]
<ast.Tuple object at 0x7da1b24ac8b0> assign[=] call[call[name[detect_emg_activations], parameter[name[data], name[sample_rate]]]][<ast.Slice object at 0x7da1b24ad4e0>]
variable[median_freq_data] assign[=] list[[]]
variable[median_freq_time] assign[=] list[[]]
for taget[name[burst]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[burst_begin]]]]]] begin[:]
variable[processing_window] assign[=] call[name[data]][<ast.Slice object at 0x7da1b24ac790>]
variable[central_point] assign[=] binary_operation[binary_operation[call[name[burst_begin]][name[burst]] + call[name[burst_end]][name[burst]]] / constant[2]]
call[name[median_freq_time].append, parameter[binary_operation[name[central_point] / name[sample_rate]]]]
<ast.Tuple object at 0x7da1b24ae8f0> assign[=] call[name[scisignal].welch, parameter[name[processing_window]]]
variable[area_freq] assign[=] call[name[integr].cumtrapz, parameter[name[power], name[freqs]]]
variable[total_power] assign[=] call[name[area_freq]][<ast.UnaryOp object at 0x7da1b25d3c40>]
call[name[median_freq_data].append, parameter[call[name[freqs]][call[call[call[name[numpy].where, parameter[compare[name[area_freq] greater_or_equal[>=] binary_operation[name[total_power] / constant[2]]]]]][constant[0]]][constant[0]]]]]
if compare[name[show_plot] is constant[True]] begin[:]
variable[list_figures_1] assign[=] call[name[plot], parameter[list[[<ast.Call object at 0x7da1b25d1120>, <ast.Call object at 0x7da1b25d1a50>]], list[[<ast.Call object at 0x7da1b25d1e70>, <ast.Call object at 0x7da1b25d3460>]]]]
for taget[name[burst]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[burst_begin]]]]]] begin[:]
variable[color] assign[=] call[name[opensignals_color_pallet], parameter[]]
variable[box_annotation] assign[=] call[name[BoxAnnotation], parameter[]]
variable[box_annotation_copy] assign[=] call[name[BoxAnnotation], parameter[]]
call[call[name[list_figures_1]][constant[0]].add_layout, parameter[name[box_annotation]]]
call[call[name[list_figures_1]][constant[1]].add_layout, parameter[name[box_annotation_copy]]]
variable[gridplot_1] assign[=] call[name[gridplot], parameter[list[[<ast.List object at 0x7da20c6a82e0>, <ast.List object at 0x7da20c6ab7c0>]]]]
call[name[show], parameter[name[gridplot_1]]]
return[call[name[pandas].DataFrame, parameter[dictionary[[<ast.Constant object at 0x7da20c6a8af0>, <ast.Constant object at 0x7da20c6a9480>], [<ast.Name object at 0x7da20c6aadd0>, <ast.Name object at 0x7da20c6a9fc0>]]]]] | keyword[def] identifier[fatigue_eval_med_freq] ( identifier[data] , identifier[sample_rate] , identifier[time_units] = keyword[True] , identifier[raw_to_mv] = keyword[True] ,
identifier[device] = literal[string] , identifier[resolution] = literal[int] , identifier[show_plot] = keyword[False] ):
literal[string]
keyword[if] identifier[raw_to_mv] keyword[is] keyword[True] :
identifier[data] = identifier[raw_to_phy] ( literal[string] , identifier[device] , identifier[data] , identifier[resolution] , identifier[option] = literal[string] )
keyword[if] identifier[time_units] keyword[is] keyword[True] :
identifier[time] = identifier[numpy] . identifier[linspace] ( literal[int] , identifier[len] ( identifier[data] )/ identifier[sample_rate] , identifier[len] ( identifier[data] ))
keyword[else] :
identifier[time] = identifier[numpy] . identifier[linspace] ( literal[int] , identifier[len] ( identifier[data] )- literal[int] , identifier[len] ( identifier[data] ))
identifier[burst_begin] , identifier[burst_end] = identifier[detect_emg_activations] ( identifier[data] , identifier[sample_rate] , identifier[smooth_level] = literal[int] ,
identifier[threshold_level] = literal[int] , identifier[time_units] = keyword[False] ,
identifier[volts] = keyword[True] , identifier[resolution] = identifier[resolution] ,
identifier[device] = identifier[device] , identifier[plot_result] = keyword[False] )[: literal[int] ]
identifier[median_freq_data] =[]
identifier[median_freq_time] =[]
keyword[for] identifier[burst] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[burst_begin] )):
identifier[processing_window] = identifier[data] [ identifier[burst_begin] [ identifier[burst] ]: identifier[burst_end] [ identifier[burst] ]]
identifier[central_point] =( identifier[burst_begin] [ identifier[burst] ]+ identifier[burst_end] [ identifier[burst] ])/ literal[int]
identifier[median_freq_time] . identifier[append] ( identifier[central_point] / identifier[sample_rate] )
identifier[freqs] , identifier[power] = identifier[scisignal] . identifier[welch] ( identifier[processing_window] , identifier[fs] = identifier[sample_rate] , identifier[window] = literal[string] ,
identifier[noverlap] = literal[int] , identifier[nfft] = identifier[int] ( literal[int] ))
identifier[area_freq] = identifier[integr] . identifier[cumtrapz] ( identifier[power] , identifier[freqs] , identifier[initial] = literal[int] )
identifier[total_power] = identifier[area_freq] [- literal[int] ]
identifier[median_freq_data] . identifier[append] ( identifier[freqs] [ identifier[numpy] . identifier[where] ( identifier[area_freq] >= identifier[total_power] / literal[int] )[ literal[int] ][ literal[int] ]])
keyword[if] identifier[show_plot] keyword[is] keyword[True] :
identifier[list_figures_1] = identifier[plot] ([ identifier[list] ( identifier[time] ), identifier[list] ( identifier[median_freq_time] )],
[ identifier[list] ( identifier[data] ), identifier[list] ( identifier[median_freq_data] )],
identifier[title] =[ literal[string] ,
literal[string] ], identifier[gridPlot] = keyword[True] ,
identifier[gridLines] = literal[int] , identifier[gridColumns] = literal[int] , identifier[openSignalsStyle] = keyword[True] ,
identifier[x_axis_label] = literal[string] ,
identifier[yAxisLabel] =[ literal[string] , literal[string] ],
identifier[x_range] =[ literal[int] , literal[int] ], identifier[show_plot] = keyword[False] )
keyword[for] identifier[burst] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[burst_begin] )):
identifier[color] = identifier[opensignals_color_pallet] ()
identifier[box_annotation] = identifier[BoxAnnotation] ( identifier[left] = identifier[burst_begin] [ identifier[burst] ]/ identifier[sample_rate] ,
identifier[right] = identifier[burst_end] [ identifier[burst] ]/ identifier[sample_rate] , identifier[fill_color] = identifier[color] ,
identifier[fill_alpha] = literal[int] )
identifier[box_annotation_copy] = identifier[BoxAnnotation] ( identifier[left] = identifier[burst_begin] [ identifier[burst] ]/ identifier[sample_rate] ,
identifier[right] = identifier[burst_end] [ identifier[burst] ]/ identifier[sample_rate] ,
identifier[fill_color] = identifier[color] , identifier[fill_alpha] = literal[int] )
identifier[list_figures_1] [ literal[int] ]. identifier[add_layout] ( identifier[box_annotation] )
identifier[list_figures_1] [ literal[int] ]. identifier[add_layout] ( identifier[box_annotation_copy] )
identifier[gridplot_1] = identifier[gridplot] ([[ identifier[list_figures_1] [ literal[int] ]],[ identifier[list_figures_1] [ literal[int] ]]],
** identifier[opensignals_kwargs] ( literal[string] ))
identifier[show] ( identifier[gridplot_1] )
keyword[return] identifier[pandas] . identifier[DataFrame] ({ literal[string] : identifier[median_freq_time] ,
literal[string] : identifier[median_freq_data] },
identifier[columns] =[ literal[string] , literal[string] ]) | def fatigue_eval_med_freq(data, sample_rate, time_units=True, raw_to_mv=True, device='biosignalsplux', resolution=16, show_plot=False):
"""
-----
Brief
-----
Returns the evolution time series of EMG median frequency along the acquisition, based on a sliding window
mechanism.
-----------
Description
-----------
The median frequency of activation events in EMG signal is particularly important in fatigue evaluation methods.
This function calculates the median frequency of each activation period and allows to plot those values in order to
see the temporal evolution of this particular feature.
----------
Parameters
----------
data : list
EMG signal.
sample_rate : int
Sampling frequency.
time_units : boolean
If True this function will return the x axis samples in seconds.
raw_to_mv : boolean
If True then it is assumed that the input samples are in a raw format and the output
results will be in mV. When True "device" and "resolution" inputs became mandatory.
device : str
Plux device label:
- "bioplux"
- "bioplux_exp"
- "biosignalsplux"
- "rachimeter"
- "channeller"
- "swifter"
- "ddme_openbanplux"
resolution : int
Resolution selected during acquisition.
show_plot : boolean
If True, then a figure with the median frequency evolution will be shown.
Returns
-------
out : pandas.DataFrame
DataFrame with the time and the sequence of median frequency evolution.
"""
# Conversion of data samples to mV if requested by raw_to_mv input.
if raw_to_mv is True:
data = raw_to_phy('EMG', device, data, resolution, option='mV') # depends on [control=['if'], data=[]]
# Definition of the time axis.
if time_units is True:
time = numpy.linspace(0, len(data) / sample_rate, len(data)) # depends on [control=['if'], data=[]]
else:
time = numpy.linspace(0, len(data) - 1, len(data))
# Detection of muscular activation periods.
(burst_begin, burst_end) = detect_emg_activations(data, sample_rate, smooth_level=20, threshold_level=10, time_units=False, volts=True, resolution=resolution, device=device, plot_result=False)[:2]
# Iteration along bursts.
median_freq_data = []
median_freq_time = []
for burst in range(0, len(burst_begin)):
processing_window = data[burst_begin[burst]:burst_end[burst]]
central_point = (burst_begin[burst] + burst_end[burst]) / 2
median_freq_time.append(central_point / sample_rate)
# Generation of the processing window power spectrum.
(freqs, power) = scisignal.welch(processing_window, fs=sample_rate, window='hanning', noverlap=0, nfft=int(256.0))
# Determination of median power frequency.
area_freq = integr.cumtrapz(power, freqs, initial=0)
total_power = area_freq[-1]
median_freq_data.append(freqs[numpy.where(area_freq >= total_power / 2)[0][0]]) # depends on [control=['for'], data=['burst']]
# Graphical Representation step.
if show_plot is True:
list_figures_1 = plot([list(time), list(median_freq_time)], [list(data), list(median_freq_data)], title=['EMG Acquisition highlighting bursts', 'Median Frequency Evolution'], gridPlot=True, gridLines=2, gridColumns=1, openSignalsStyle=True, x_axis_label='Time (s)', yAxisLabel=['Raw Data', 'Median Frequency (Hz)'], x_range=[0, 125], show_plot=False)
# Highlighting processing window.
for burst in range(0, len(burst_begin)):
color = opensignals_color_pallet()
box_annotation = BoxAnnotation(left=burst_begin[burst] / sample_rate, right=burst_end[burst] / sample_rate, fill_color=color, fill_alpha=0.1)
box_annotation_copy = BoxAnnotation(left=burst_begin[burst] / sample_rate, right=burst_end[burst] / sample_rate, fill_color=color, fill_alpha=0.1)
list_figures_1[0].add_layout(box_annotation)
list_figures_1[1].add_layout(box_annotation_copy) # depends on [control=['for'], data=['burst']]
gridplot_1 = gridplot([[list_figures_1[0]], [list_figures_1[1]]], **opensignals_kwargs('gridplot'))
show(gridplot_1) # depends on [control=['if'], data=[]]
# pandas.DataFrame(a, columns=a.keys())
# pandas.DataFrame([a], columns=a.keys())
return pandas.DataFrame({'Time (s)': median_freq_time, 'Median Frequency (Hz)': median_freq_data}, columns=['Time (s)', 'Median Frequency (Hz)']) |
def fixed_point_uniform(points, cells, *args, **kwargs):
"""Idea:
Move interior mesh points into the weighted averages of the circumcenters
of their adjacent cells. If a triangle cell switches orientation in the
process, don't move quite so far.
"""
def get_new_points(mesh):
# Get circumcenters everywhere except at cells adjacent to the boundary;
# barycenters there.
cc = mesh.cell_circumcenters
bc = mesh.cell_barycenters
# Find all cells with a boundary edge
boundary_cell_ids = mesh.edges_cells[1][:, 0]
cc[boundary_cell_ids] = bc[boundary_cell_ids]
return get_new_points_volume_averaged(mesh, cc)
mesh = MeshTri(points, cells)
runner(get_new_points, mesh, *args, **kwargs)
return mesh.node_coords, mesh.cells["nodes"] | def function[fixed_point_uniform, parameter[points, cells]]:
constant[Idea:
Move interior mesh points into the weighted averages of the circumcenters
of their adjacent cells. If a triangle cell switches orientation in the
process, don't move quite so far.
]
def function[get_new_points, parameter[mesh]]:
variable[cc] assign[=] name[mesh].cell_circumcenters
variable[bc] assign[=] name[mesh].cell_barycenters
variable[boundary_cell_ids] assign[=] call[call[name[mesh].edges_cells][constant[1]]][tuple[[<ast.Slice object at 0x7da20c7c8b80>, <ast.Constant object at 0x7da20c7c9660>]]]
call[name[cc]][name[boundary_cell_ids]] assign[=] call[name[bc]][name[boundary_cell_ids]]
return[call[name[get_new_points_volume_averaged], parameter[name[mesh], name[cc]]]]
variable[mesh] assign[=] call[name[MeshTri], parameter[name[points], name[cells]]]
call[name[runner], parameter[name[get_new_points], name[mesh], <ast.Starred object at 0x7da18fe91600>]]
return[tuple[[<ast.Attribute object at 0x7da18fe92020>, <ast.Subscript object at 0x7da18fe91c30>]]] | keyword[def] identifier[fixed_point_uniform] ( identifier[points] , identifier[cells] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[get_new_points] ( identifier[mesh] ):
identifier[cc] = identifier[mesh] . identifier[cell_circumcenters]
identifier[bc] = identifier[mesh] . identifier[cell_barycenters]
identifier[boundary_cell_ids] = identifier[mesh] . identifier[edges_cells] [ literal[int] ][:, literal[int] ]
identifier[cc] [ identifier[boundary_cell_ids] ]= identifier[bc] [ identifier[boundary_cell_ids] ]
keyword[return] identifier[get_new_points_volume_averaged] ( identifier[mesh] , identifier[cc] )
identifier[mesh] = identifier[MeshTri] ( identifier[points] , identifier[cells] )
identifier[runner] ( identifier[get_new_points] , identifier[mesh] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[mesh] . identifier[node_coords] , identifier[mesh] . identifier[cells] [ literal[string] ] | def fixed_point_uniform(points, cells, *args, **kwargs):
"""Idea:
Move interior mesh points into the weighted averages of the circumcenters
of their adjacent cells. If a triangle cell switches orientation in the
process, don't move quite so far.
"""
def get_new_points(mesh):
# Get circumcenters everywhere except at cells adjacent to the boundary;
# barycenters there.
cc = mesh.cell_circumcenters
bc = mesh.cell_barycenters
# Find all cells with a boundary edge
boundary_cell_ids = mesh.edges_cells[1][:, 0]
cc[boundary_cell_ids] = bc[boundary_cell_ids]
return get_new_points_volume_averaged(mesh, cc)
mesh = MeshTri(points, cells)
runner(get_new_points, mesh, *args, **kwargs)
return (mesh.node_coords, mesh.cells['nodes']) |
def cmServiceAccept():
"""CM SERVICE ACCEPT Section 9.2.5"""
a = TpPd(pd=0x5)
b = MessageType(mesType=0x21) # 00100001
packet = a / b
return packet | def function[cmServiceAccept, parameter[]]:
constant[CM SERVICE ACCEPT Section 9.2.5]
variable[a] assign[=] call[name[TpPd], parameter[]]
variable[b] assign[=] call[name[MessageType], parameter[]]
variable[packet] assign[=] binary_operation[name[a] / name[b]]
return[name[packet]] | keyword[def] identifier[cmServiceAccept] ():
literal[string]
identifier[a] = identifier[TpPd] ( identifier[pd] = literal[int] )
identifier[b] = identifier[MessageType] ( identifier[mesType] = literal[int] )
identifier[packet] = identifier[a] / identifier[b]
keyword[return] identifier[packet] | def cmServiceAccept():
"""CM SERVICE ACCEPT Section 9.2.5"""
a = TpPd(pd=5)
b = MessageType(mesType=33) # 00100001
packet = a / b
return packet |
def status(self):
"""Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose."""
minutes = min(self.duration, MAX_CPM_TIME) / 1000 / 60.0
cpm = self.count / minutes if minutes > 0 else 0
return dict(
duration=round(self.duration / 1000.0, 2),
cpm=round(cpm, 2),
uSvh=round(cpm / K_ALPHA, 3),
uSvhError=round(math.sqrt(self.count) / minutes / K_ALPHA, 3)
if minutes > 0
else 0,
) | def function[status, parameter[self]]:
constant[Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose.]
variable[minutes] assign[=] binary_operation[binary_operation[call[name[min], parameter[name[self].duration, name[MAX_CPM_TIME]]] / constant[1000]] / constant[60.0]]
variable[cpm] assign[=] <ast.IfExp object at 0x7da18bc73c40>
return[call[name[dict], parameter[]]] | keyword[def] identifier[status] ( identifier[self] ):
literal[string]
identifier[minutes] = identifier[min] ( identifier[self] . identifier[duration] , identifier[MAX_CPM_TIME] )/ literal[int] / literal[int]
identifier[cpm] = identifier[self] . identifier[count] / identifier[minutes] keyword[if] identifier[minutes] > literal[int] keyword[else] literal[int]
keyword[return] identifier[dict] (
identifier[duration] = identifier[round] ( identifier[self] . identifier[duration] / literal[int] , literal[int] ),
identifier[cpm] = identifier[round] ( identifier[cpm] , literal[int] ),
identifier[uSvh] = identifier[round] ( identifier[cpm] / identifier[K_ALPHA] , literal[int] ),
identifier[uSvhError] = identifier[round] ( identifier[math] . identifier[sqrt] ( identifier[self] . identifier[count] )/ identifier[minutes] / identifier[K_ALPHA] , literal[int] )
keyword[if] identifier[minutes] > literal[int]
keyword[else] literal[int] ,
) | def status(self):
"""Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose."""
minutes = min(self.duration, MAX_CPM_TIME) / 1000 / 60.0
cpm = self.count / minutes if minutes > 0 else 0
return dict(duration=round(self.duration / 1000.0, 2), cpm=round(cpm, 2), uSvh=round(cpm / K_ALPHA, 3), uSvhError=round(math.sqrt(self.count) / minutes / K_ALPHA, 3) if minutes > 0 else 0) |
def fit(self, X, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequence : array-like, [sequence_length, n_features]
A multivariate timeseries.
y : None
Ignored
Returns
-------
self
"""
return self.partial_fit(np.concatenate(X, axis=0)) | def function[fit, parameter[self, X, y]]:
constant[Fit Preprocessing to X.
Parameters
----------
sequence : array-like, [sequence_length, n_features]
A multivariate timeseries.
y : None
Ignored
Returns
-------
self
]
return[call[name[self].partial_fit, parameter[call[name[np].concatenate, parameter[name[X]]]]]] | keyword[def] identifier[fit] ( identifier[self] , identifier[X] , identifier[y] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[partial_fit] ( identifier[np] . identifier[concatenate] ( identifier[X] , identifier[axis] = literal[int] )) | def fit(self, X, y=None):
"""Fit Preprocessing to X.
Parameters
----------
sequence : array-like, [sequence_length, n_features]
A multivariate timeseries.
y : None
Ignored
Returns
-------
self
"""
return self.partial_fit(np.concatenate(X, axis=0)) |
def _connect_setns(spec, kind=None):
"""
Return ContextService arguments for a mitogen_setns connection.
"""
return {
'method': 'setns',
'kwargs': {
'container': spec.remote_addr(),
'username': spec.remote_user(),
'python_path': spec.python_path(),
'kind': kind or spec.mitogen_kind(),
'docker_path': spec.mitogen_docker_path(),
'lxc_path': spec.mitogen_lxc_path(),
'lxc_info_path': spec.mitogen_lxc_info_path(),
'machinectl_path': spec.mitogen_machinectl_path(),
}
} | def function[_connect_setns, parameter[spec, kind]]:
constant[
Return ContextService arguments for a mitogen_setns connection.
]
return[dictionary[[<ast.Constant object at 0x7da1b1d52830>, <ast.Constant object at 0x7da1b1d53820>], [<ast.Constant object at 0x7da1b1d52620>, <ast.Dict object at 0x7da1b1d50d00>]]] | keyword[def] identifier[_connect_setns] ( identifier[spec] , identifier[kind] = keyword[None] ):
literal[string]
keyword[return] {
literal[string] : literal[string] ,
literal[string] :{
literal[string] : identifier[spec] . identifier[remote_addr] (),
literal[string] : identifier[spec] . identifier[remote_user] (),
literal[string] : identifier[spec] . identifier[python_path] (),
literal[string] : identifier[kind] keyword[or] identifier[spec] . identifier[mitogen_kind] (),
literal[string] : identifier[spec] . identifier[mitogen_docker_path] (),
literal[string] : identifier[spec] . identifier[mitogen_lxc_path] (),
literal[string] : identifier[spec] . identifier[mitogen_lxc_info_path] (),
literal[string] : identifier[spec] . identifier[mitogen_machinectl_path] (),
}
} | def _connect_setns(spec, kind=None):
"""
Return ContextService arguments for a mitogen_setns connection.
"""
return {'method': 'setns', 'kwargs': {'container': spec.remote_addr(), 'username': spec.remote_user(), 'python_path': spec.python_path(), 'kind': kind or spec.mitogen_kind(), 'docker_path': spec.mitogen_docker_path(), 'lxc_path': spec.mitogen_lxc_path(), 'lxc_info_path': spec.mitogen_lxc_info_path(), 'machinectl_path': spec.mitogen_machinectl_path()}} |
def vnc_raspi_osmc():
'''Install and configure dispmanx_vnc server on osmc (raspberry pi).
More Infos:
* https://github.com/patrikolausson/dispmanx_vnc
* https://discourse.osmc.tv/t/howto-install-a-vnc-server-on-the-raspberry-pi/1517
* tightvnc:
* http://raspberry.tips/raspberrypi-einsteiger/raspberry-pi-einsteiger-guide-vnc-einrichten-teil-4/
* http://jankarres.de/2012/08/raspberry-pi-vnc-server-installieren/
'''
print(blue('Install dependencies'))
install_packages([
'git',
'build-essential',
'rbp-userland-dev-osmc',
'libvncserver-dev',
'libconfig++-dev',
])
print(blue('Build vnc server for raspberry pi using dispmanx '
'(dispmanx_vnc)'))
checkup_git_repo_legacy(
url='https://github.com/patrikolausson/dispmanx_vnc.git')
run('mkdir -p ~/repos')
run('cd ~/repos/dispmanx_vnc && make')
print(blue('set up dispmanx_vnc as a service'))
with warn_only():
run('sudo systemctl stop dispmanx_vncserver.service')
username = env.user
builddir = flo('/home/{username}/repos/dispmanx_vnc')
run(flo('sudo cp {builddir}/dispmanx_vncserver /usr/bin'))
run('sudo chmod +x /usr/bin/dispmanx_vncserver')
fabfile_data_dir = FABFILE_DATA_DIR
put('{fabfile_data_dir}/files/etc/dispmanx_vncserver.conf', '/tmp/')
run('sudo mv /tmp/dispmanx_vncserver.conf /etc/dispmanx_vncserver.conf')
put('{fabfile_data_dir}/files/etc/systemd/system/dispmanx_vncserver.service',
'/tmp/')
run('sudo mv /tmp/dispmanx_vncserver.service '
'/etc/systemd/system/dispmanx_vncserver.service')
run('sudo systemctl start dispmanx_vncserver.service')
run('sudo systemctl enable dispmanx_vncserver.service')
run('sudo systemctl daemon-reload') | def function[vnc_raspi_osmc, parameter[]]:
constant[Install and configure dispmanx_vnc server on osmc (raspberry pi).
More Infos:
* https://github.com/patrikolausson/dispmanx_vnc
* https://discourse.osmc.tv/t/howto-install-a-vnc-server-on-the-raspberry-pi/1517
* tightvnc:
* http://raspberry.tips/raspberrypi-einsteiger/raspberry-pi-einsteiger-guide-vnc-einrichten-teil-4/
* http://jankarres.de/2012/08/raspberry-pi-vnc-server-installieren/
]
call[name[print], parameter[call[name[blue], parameter[constant[Install dependencies]]]]]
call[name[install_packages], parameter[list[[<ast.Constant object at 0x7da18dc9b910>, <ast.Constant object at 0x7da18dc99ab0>, <ast.Constant object at 0x7da18dc9b820>, <ast.Constant object at 0x7da18dc9bd90>, <ast.Constant object at 0x7da18dc985b0>]]]]
call[name[print], parameter[call[name[blue], parameter[constant[Build vnc server for raspberry pi using dispmanx (dispmanx_vnc)]]]]]
call[name[checkup_git_repo_legacy], parameter[]]
call[name[run], parameter[constant[mkdir -p ~/repos]]]
call[name[run], parameter[constant[cd ~/repos/dispmanx_vnc && make]]]
call[name[print], parameter[call[name[blue], parameter[constant[set up dispmanx_vnc as a service]]]]]
with call[name[warn_only], parameter[]] begin[:]
call[name[run], parameter[constant[sudo systemctl stop dispmanx_vncserver.service]]]
variable[username] assign[=] name[env].user
variable[builddir] assign[=] call[name[flo], parameter[constant[/home/{username}/repos/dispmanx_vnc]]]
call[name[run], parameter[call[name[flo], parameter[constant[sudo cp {builddir}/dispmanx_vncserver /usr/bin]]]]]
call[name[run], parameter[constant[sudo chmod +x /usr/bin/dispmanx_vncserver]]]
variable[fabfile_data_dir] assign[=] name[FABFILE_DATA_DIR]
call[name[put], parameter[constant[{fabfile_data_dir}/files/etc/dispmanx_vncserver.conf], constant[/tmp/]]]
call[name[run], parameter[constant[sudo mv /tmp/dispmanx_vncserver.conf /etc/dispmanx_vncserver.conf]]]
call[name[put], parameter[constant[{fabfile_data_dir}/files/etc/systemd/system/dispmanx_vncserver.service], constant[/tmp/]]]
call[name[run], parameter[constant[sudo mv /tmp/dispmanx_vncserver.service /etc/systemd/system/dispmanx_vncserver.service]]]
call[name[run], parameter[constant[sudo systemctl start dispmanx_vncserver.service]]]
call[name[run], parameter[constant[sudo systemctl enable dispmanx_vncserver.service]]]
call[name[run], parameter[constant[sudo systemctl daemon-reload]]] | keyword[def] identifier[vnc_raspi_osmc] ():
literal[string]
identifier[print] ( identifier[blue] ( literal[string] ))
identifier[install_packages] ([
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
])
identifier[print] ( identifier[blue] ( literal[string]
literal[string] ))
identifier[checkup_git_repo_legacy] (
identifier[url] = literal[string] )
identifier[run] ( literal[string] )
identifier[run] ( literal[string] )
identifier[print] ( identifier[blue] ( literal[string] ))
keyword[with] identifier[warn_only] ():
identifier[run] ( literal[string] )
identifier[username] = identifier[env] . identifier[user]
identifier[builddir] = identifier[flo] ( literal[string] )
identifier[run] ( identifier[flo] ( literal[string] ))
identifier[run] ( literal[string] )
identifier[fabfile_data_dir] = identifier[FABFILE_DATA_DIR]
identifier[put] ( literal[string] , literal[string] )
identifier[run] ( literal[string] )
identifier[put] ( literal[string] ,
literal[string] )
identifier[run] ( literal[string]
literal[string] )
identifier[run] ( literal[string] )
identifier[run] ( literal[string] )
identifier[run] ( literal[string] ) | def vnc_raspi_osmc():
"""Install and configure dispmanx_vnc server on osmc (raspberry pi).
More Infos:
* https://github.com/patrikolausson/dispmanx_vnc
* https://discourse.osmc.tv/t/howto-install-a-vnc-server-on-the-raspberry-pi/1517
* tightvnc:
* http://raspberry.tips/raspberrypi-einsteiger/raspberry-pi-einsteiger-guide-vnc-einrichten-teil-4/
* http://jankarres.de/2012/08/raspberry-pi-vnc-server-installieren/
"""
print(blue('Install dependencies'))
install_packages(['git', 'build-essential', 'rbp-userland-dev-osmc', 'libvncserver-dev', 'libconfig++-dev'])
print(blue('Build vnc server for raspberry pi using dispmanx (dispmanx_vnc)'))
checkup_git_repo_legacy(url='https://github.com/patrikolausson/dispmanx_vnc.git')
run('mkdir -p ~/repos')
run('cd ~/repos/dispmanx_vnc && make')
print(blue('set up dispmanx_vnc as a service'))
with warn_only():
run('sudo systemctl stop dispmanx_vncserver.service') # depends on [control=['with'], data=[]]
username = env.user
builddir = flo('/home/{username}/repos/dispmanx_vnc')
run(flo('sudo cp {builddir}/dispmanx_vncserver /usr/bin'))
run('sudo chmod +x /usr/bin/dispmanx_vncserver')
fabfile_data_dir = FABFILE_DATA_DIR
put('{fabfile_data_dir}/files/etc/dispmanx_vncserver.conf', '/tmp/')
run('sudo mv /tmp/dispmanx_vncserver.conf /etc/dispmanx_vncserver.conf')
put('{fabfile_data_dir}/files/etc/systemd/system/dispmanx_vncserver.service', '/tmp/')
run('sudo mv /tmp/dispmanx_vncserver.service /etc/systemd/system/dispmanx_vncserver.service')
run('sudo systemctl start dispmanx_vncserver.service')
run('sudo systemctl enable dispmanx_vncserver.service')
run('sudo systemctl daemon-reload') |
def search_memories(self):
"""
Search and return list of 1-wire memories.
"""
if not self.connected:
raise NotConnected()
return self._cf.mem.get_mems(MemoryElement.TYPE_1W) | def function[search_memories, parameter[self]]:
constant[
Search and return list of 1-wire memories.
]
if <ast.UnaryOp object at 0x7da1b1634250> begin[:]
<ast.Raise object at 0x7da1b16355d0>
return[call[name[self]._cf.mem.get_mems, parameter[name[MemoryElement].TYPE_1W]]] | keyword[def] identifier[search_memories] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[connected] :
keyword[raise] identifier[NotConnected] ()
keyword[return] identifier[self] . identifier[_cf] . identifier[mem] . identifier[get_mems] ( identifier[MemoryElement] . identifier[TYPE_1W] ) | def search_memories(self):
"""
Search and return list of 1-wire memories.
"""
if not self.connected:
raise NotConnected() # depends on [control=['if'], data=[]]
return self._cf.mem.get_mems(MemoryElement.TYPE_1W) |
def sflow_source_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
sflow = ET.SubElement(config, "sflow", xmlns="urn:brocade.com:mgmt:brocade-sflow")
source_ip = ET.SubElement(sflow, "source-ip")
source_ip.text = kwargs.pop('source_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[sflow_source_ip, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[sflow] assign[=] call[name[ET].SubElement, parameter[name[config], constant[sflow]]]
variable[source_ip] assign[=] call[name[ET].SubElement, parameter[name[sflow], constant[source-ip]]]
name[source_ip].text assign[=] call[name[kwargs].pop, parameter[constant[source_ip]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[sflow_source_ip] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[sflow] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[source_ip] = identifier[ET] . identifier[SubElement] ( identifier[sflow] , literal[string] )
identifier[source_ip] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def sflow_source_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
sflow = ET.SubElement(config, 'sflow', xmlns='urn:brocade.com:mgmt:brocade-sflow')
source_ip = ET.SubElement(sflow, 'source-ip')
source_ip.text = kwargs.pop('source_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def histogram(data, **kwargs):
"""
Function to create histogram, e.g. for voltages or currents.
Parameters
----------
data : :pandas:`pandas.DataFrame<dataframe>`
Data to be plotted, e.g. voltage or current (`v_res` or `i_res` from
:class:`edisgo.grid.network.Results`). Index of the dataframe must be
a :pandas:`pandas.DatetimeIndex<datetimeindex>`.
timeindex : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timeindex is None all
time steps provided in dataframe are used. Default: None.
directory : :obj:`str` or None, optional
Path to directory the plot is saved to. Is created if it does not
exist. Default: None.
filename : :obj:`str` or None, optional
Filename the plot is saved as. File format is specified by ending. If
filename is None, the plot is shown. Default: None.
color : :obj:`str` or None, optional
Color used in plot. If None it defaults to blue. Default: None.
alpha : :obj:`float`, optional
Transparency of the plot. Must be a number between 0 and 1,
where 0 is see through and 1 is opaque. Default: 1.
title : :obj:`str` or None, optional
Plot title. Default: None.
x_label : :obj:`str`, optional
Label for x-axis. Default: "".
y_label : :obj:`str`, optional
Label for y-axis. Default: "".
normed : :obj:`bool`, optional
Defines if histogram is normed. Default: False.
x_limits : :obj:`tuple` or None, optional
Tuple with x-axis limits. First entry is the minimum and second entry
the maximum value. Default: None.
y_limits : :obj:`tuple` or None, optional
Tuple with y-axis limits. First entry is the minimum and second entry
the maximum value. Default: None.
fig_size : :obj:`str` or :obj:`tuple`, optional
Size of the figure in inches or a string with the following options:
* 'a4portrait'
* 'a4landscape'
* 'a5portrait'
* 'a5landscape'
Default: 'a5landscape'.
binwidth : :obj:`float`
Width of bins. Default: None.
"""
timeindex = kwargs.get('timeindex', None)
directory = kwargs.get('directory', None)
filename = kwargs.get('filename', None)
title = kwargs.get('title', "")
x_label = kwargs.get('x_label', "")
y_label = kwargs.get('y_label', "")
color = kwargs.get('color', None)
alpha = kwargs.get('alpha', 1)
normed = kwargs.get('normed', False)
x_limits = kwargs.get('x_limits', None)
y_limits = kwargs.get('y_limits', None)
binwidth = kwargs.get('binwidth', None)
fig_size = kwargs.get('fig_size', 'a5landscape')
standard_sizes = {'a4portrait': (8.27, 11.69),
'a4landscape': (11.69, 8.27),
'a5portrait': (5.8, 8.3),
'a5landscape': (8.3, 5.8)}
try:
fig_size = standard_sizes[fig_size]
except:
fig_size = standard_sizes['a5landscape']
if timeindex is not None:
plot_data = data.loc[timeindex, :]
else:
plot_data = data.T.stack()
if binwidth is not None:
if x_limits is not None:
lowerlimit = x_limits[0] - binwidth / 2
upperlimit = x_limits[1] + binwidth / 2
else:
lowerlimit = plot_data.min() - binwidth / 2
upperlimit = plot_data.max() + binwidth / 2
bins = np.arange(lowerlimit, upperlimit, binwidth)
else:
bins = 10
plt.figure(figsize=fig_size)
ax = plot_data.hist(
normed=normed, color=color, alpha=alpha, bins=bins, grid=True)
plt.minorticks_on()
if x_limits is not None:
ax.set_xlim(x_limits[0], x_limits[1])
if y_limits is not None:
ax.set_ylim(y_limits[0], y_limits[1])
if title is not None:
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
if filename is None:
plt.show()
else:
if directory is not None:
os.makedirs(directory, exist_ok=True)
filename = os.path.join(directory, filename)
plt.savefig(filename)
plt.close() | def function[histogram, parameter[data]]:
constant[
Function to create histogram, e.g. for voltages or currents.
Parameters
----------
data : :pandas:`pandas.DataFrame<dataframe>`
Data to be plotted, e.g. voltage or current (`v_res` or `i_res` from
:class:`edisgo.grid.network.Results`). Index of the dataframe must be
a :pandas:`pandas.DatetimeIndex<datetimeindex>`.
timeindex : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timeindex is None all
time steps provided in dataframe are used. Default: None.
directory : :obj:`str` or None, optional
Path to directory the plot is saved to. Is created if it does not
exist. Default: None.
filename : :obj:`str` or None, optional
Filename the plot is saved as. File format is specified by ending. If
filename is None, the plot is shown. Default: None.
color : :obj:`str` or None, optional
Color used in plot. If None it defaults to blue. Default: None.
alpha : :obj:`float`, optional
Transparency of the plot. Must be a number between 0 and 1,
where 0 is see through and 1 is opaque. Default: 1.
title : :obj:`str` or None, optional
Plot title. Default: None.
x_label : :obj:`str`, optional
Label for x-axis. Default: "".
y_label : :obj:`str`, optional
Label for y-axis. Default: "".
normed : :obj:`bool`, optional
Defines if histogram is normed. Default: False.
x_limits : :obj:`tuple` or None, optional
Tuple with x-axis limits. First entry is the minimum and second entry
the maximum value. Default: None.
y_limits : :obj:`tuple` or None, optional
Tuple with y-axis limits. First entry is the minimum and second entry
the maximum value. Default: None.
fig_size : :obj:`str` or :obj:`tuple`, optional
Size of the figure in inches or a string with the following options:
* 'a4portrait'
* 'a4landscape'
* 'a5portrait'
* 'a5landscape'
Default: 'a5landscape'.
binwidth : :obj:`float`
Width of bins. Default: None.
]
variable[timeindex] assign[=] call[name[kwargs].get, parameter[constant[timeindex], constant[None]]]
variable[directory] assign[=] call[name[kwargs].get, parameter[constant[directory], constant[None]]]
variable[filename] assign[=] call[name[kwargs].get, parameter[constant[filename], constant[None]]]
variable[title] assign[=] call[name[kwargs].get, parameter[constant[title], constant[]]]
variable[x_label] assign[=] call[name[kwargs].get, parameter[constant[x_label], constant[]]]
variable[y_label] assign[=] call[name[kwargs].get, parameter[constant[y_label], constant[]]]
variable[color] assign[=] call[name[kwargs].get, parameter[constant[color], constant[None]]]
variable[alpha] assign[=] call[name[kwargs].get, parameter[constant[alpha], constant[1]]]
variable[normed] assign[=] call[name[kwargs].get, parameter[constant[normed], constant[False]]]
variable[x_limits] assign[=] call[name[kwargs].get, parameter[constant[x_limits], constant[None]]]
variable[y_limits] assign[=] call[name[kwargs].get, parameter[constant[y_limits], constant[None]]]
variable[binwidth] assign[=] call[name[kwargs].get, parameter[constant[binwidth], constant[None]]]
variable[fig_size] assign[=] call[name[kwargs].get, parameter[constant[fig_size], constant[a5landscape]]]
variable[standard_sizes] assign[=] dictionary[[<ast.Constant object at 0x7da1b04bc0a0>, <ast.Constant object at 0x7da1b04bc850>, <ast.Constant object at 0x7da1b04bfb20>, <ast.Constant object at 0x7da1b04bc700>], [<ast.Tuple object at 0x7da1b04bdf00>, <ast.Tuple object at 0x7da1b04bcee0>, <ast.Tuple object at 0x7da1b04bfd00>, <ast.Tuple object at 0x7da1b04bc610>]]
<ast.Try object at 0x7da1b04be7a0>
if compare[name[timeindex] is_not constant[None]] begin[:]
variable[plot_data] assign[=] call[name[data].loc][tuple[[<ast.Name object at 0x7da1b04be5f0>, <ast.Slice object at 0x7da1b04be110>]]]
if compare[name[binwidth] is_not constant[None]] begin[:]
if compare[name[x_limits] is_not constant[None]] begin[:]
variable[lowerlimit] assign[=] binary_operation[call[name[x_limits]][constant[0]] - binary_operation[name[binwidth] / constant[2]]]
variable[upperlimit] assign[=] binary_operation[call[name[x_limits]][constant[1]] + binary_operation[name[binwidth] / constant[2]]]
variable[bins] assign[=] call[name[np].arange, parameter[name[lowerlimit], name[upperlimit], name[binwidth]]]
call[name[plt].figure, parameter[]]
variable[ax] assign[=] call[name[plot_data].hist, parameter[]]
call[name[plt].minorticks_on, parameter[]]
if compare[name[x_limits] is_not constant[None]] begin[:]
call[name[ax].set_xlim, parameter[call[name[x_limits]][constant[0]], call[name[x_limits]][constant[1]]]]
if compare[name[y_limits] is_not constant[None]] begin[:]
call[name[ax].set_ylim, parameter[call[name[y_limits]][constant[0]], call[name[y_limits]][constant[1]]]]
if compare[name[title] is_not constant[None]] begin[:]
call[name[plt].title, parameter[name[title]]]
call[name[plt].xlabel, parameter[name[x_label]]]
call[name[plt].ylabel, parameter[name[y_label]]]
if compare[name[filename] is constant[None]] begin[:]
call[name[plt].show, parameter[]] | keyword[def] identifier[histogram] ( identifier[data] ,** identifier[kwargs] ):
literal[string]
identifier[timeindex] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[directory] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[filename] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[title] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[x_label] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[y_label] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[color] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[alpha] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[normed] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[x_limits] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[y_limits] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[binwidth] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
identifier[fig_size] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[standard_sizes] ={ literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] )}
keyword[try] :
identifier[fig_size] = identifier[standard_sizes] [ identifier[fig_size] ]
keyword[except] :
identifier[fig_size] = identifier[standard_sizes] [ literal[string] ]
keyword[if] identifier[timeindex] keyword[is] keyword[not] keyword[None] :
identifier[plot_data] = identifier[data] . identifier[loc] [ identifier[timeindex] ,:]
keyword[else] :
identifier[plot_data] = identifier[data] . identifier[T] . identifier[stack] ()
keyword[if] identifier[binwidth] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[x_limits] keyword[is] keyword[not] keyword[None] :
identifier[lowerlimit] = identifier[x_limits] [ literal[int] ]- identifier[binwidth] / literal[int]
identifier[upperlimit] = identifier[x_limits] [ literal[int] ]+ identifier[binwidth] / literal[int]
keyword[else] :
identifier[lowerlimit] = identifier[plot_data] . identifier[min] ()- identifier[binwidth] / literal[int]
identifier[upperlimit] = identifier[plot_data] . identifier[max] ()+ identifier[binwidth] / literal[int]
identifier[bins] = identifier[np] . identifier[arange] ( identifier[lowerlimit] , identifier[upperlimit] , identifier[binwidth] )
keyword[else] :
identifier[bins] = literal[int]
identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[fig_size] )
identifier[ax] = identifier[plot_data] . identifier[hist] (
identifier[normed] = identifier[normed] , identifier[color] = identifier[color] , identifier[alpha] = identifier[alpha] , identifier[bins] = identifier[bins] , identifier[grid] = keyword[True] )
identifier[plt] . identifier[minorticks_on] ()
keyword[if] identifier[x_limits] keyword[is] keyword[not] keyword[None] :
identifier[ax] . identifier[set_xlim] ( identifier[x_limits] [ literal[int] ], identifier[x_limits] [ literal[int] ])
keyword[if] identifier[y_limits] keyword[is] keyword[not] keyword[None] :
identifier[ax] . identifier[set_ylim] ( identifier[y_limits] [ literal[int] ], identifier[y_limits] [ literal[int] ])
keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[title] ( identifier[title] )
identifier[plt] . identifier[xlabel] ( identifier[x_label] )
identifier[plt] . identifier[ylabel] ( identifier[y_label] )
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[plt] . identifier[show] ()
keyword[else] :
keyword[if] identifier[directory] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[makedirs] ( identifier[directory] , identifier[exist_ok] = keyword[True] )
identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[filename] )
identifier[plt] . identifier[savefig] ( identifier[filename] )
identifier[plt] . identifier[close] () | def histogram(data, **kwargs):
"""
Function to create histogram, e.g. for voltages or currents.
Parameters
----------
data : :pandas:`pandas.DataFrame<dataframe>`
Data to be plotted, e.g. voltage or current (`v_res` or `i_res` from
:class:`edisgo.grid.network.Results`). Index of the dataframe must be
a :pandas:`pandas.DatetimeIndex<datetimeindex>`.
timeindex : :pandas:`pandas.Timestamp<timestamp>` or None, optional
Specifies time step histogram is plotted for. If timeindex is None all
time steps provided in dataframe are used. Default: None.
directory : :obj:`str` or None, optional
Path to directory the plot is saved to. Is created if it does not
exist. Default: None.
filename : :obj:`str` or None, optional
Filename the plot is saved as. File format is specified by ending. If
filename is None, the plot is shown. Default: None.
color : :obj:`str` or None, optional
Color used in plot. If None it defaults to blue. Default: None.
alpha : :obj:`float`, optional
Transparency of the plot. Must be a number between 0 and 1,
where 0 is see through and 1 is opaque. Default: 1.
title : :obj:`str` or None, optional
Plot title. Default: None.
x_label : :obj:`str`, optional
Label for x-axis. Default: "".
y_label : :obj:`str`, optional
Label for y-axis. Default: "".
normed : :obj:`bool`, optional
Defines if histogram is normed. Default: False.
x_limits : :obj:`tuple` or None, optional
Tuple with x-axis limits. First entry is the minimum and second entry
the maximum value. Default: None.
y_limits : :obj:`tuple` or None, optional
Tuple with y-axis limits. First entry is the minimum and second entry
the maximum value. Default: None.
fig_size : :obj:`str` or :obj:`tuple`, optional
Size of the figure in inches or a string with the following options:
* 'a4portrait'
* 'a4landscape'
* 'a5portrait'
* 'a5landscape'
Default: 'a5landscape'.
binwidth : :obj:`float`
Width of bins. Default: None.
"""
timeindex = kwargs.get('timeindex', None)
directory = kwargs.get('directory', None)
filename = kwargs.get('filename', None)
title = kwargs.get('title', '')
x_label = kwargs.get('x_label', '')
y_label = kwargs.get('y_label', '')
color = kwargs.get('color', None)
alpha = kwargs.get('alpha', 1)
normed = kwargs.get('normed', False)
x_limits = kwargs.get('x_limits', None)
y_limits = kwargs.get('y_limits', None)
binwidth = kwargs.get('binwidth', None)
fig_size = kwargs.get('fig_size', 'a5landscape')
standard_sizes = {'a4portrait': (8.27, 11.69), 'a4landscape': (11.69, 8.27), 'a5portrait': (5.8, 8.3), 'a5landscape': (8.3, 5.8)}
try:
fig_size = standard_sizes[fig_size] # depends on [control=['try'], data=[]]
except:
fig_size = standard_sizes['a5landscape'] # depends on [control=['except'], data=[]]
if timeindex is not None:
plot_data = data.loc[timeindex, :] # depends on [control=['if'], data=['timeindex']]
else:
plot_data = data.T.stack()
if binwidth is not None:
if x_limits is not None:
lowerlimit = x_limits[0] - binwidth / 2
upperlimit = x_limits[1] + binwidth / 2 # depends on [control=['if'], data=['x_limits']]
else:
lowerlimit = plot_data.min() - binwidth / 2
upperlimit = plot_data.max() + binwidth / 2
bins = np.arange(lowerlimit, upperlimit, binwidth) # depends on [control=['if'], data=['binwidth']]
else:
bins = 10
plt.figure(figsize=fig_size)
ax = plot_data.hist(normed=normed, color=color, alpha=alpha, bins=bins, grid=True)
plt.minorticks_on()
if x_limits is not None:
ax.set_xlim(x_limits[0], x_limits[1]) # depends on [control=['if'], data=['x_limits']]
if y_limits is not None:
ax.set_ylim(y_limits[0], y_limits[1]) # depends on [control=['if'], data=['y_limits']]
if title is not None:
plt.title(title) # depends on [control=['if'], data=['title']]
plt.xlabel(x_label)
plt.ylabel(y_label)
if filename is None:
plt.show() # depends on [control=['if'], data=[]]
else:
if directory is not None:
os.makedirs(directory, exist_ok=True)
filename = os.path.join(directory, filename) # depends on [control=['if'], data=['directory']]
plt.savefig(filename)
plt.close() |
def t_ID(self, t):
r"[a-zA-Z_][0-9a-zA-Z_]*|[a-zA-Z][0-9a-zA-Z_]*[0-9a-zA-Z_]+"
t.endlexpos = t.lexpos + len(t.value)
value = t.value.upper()
if value in self.keywords:
t.type = value
return t | def function[t_ID, parameter[self, t]]:
constant[[a-zA-Z_][0-9a-zA-Z_]*|[a-zA-Z][0-9a-zA-Z_]*[0-9a-zA-Z_]+]
name[t].endlexpos assign[=] binary_operation[name[t].lexpos + call[name[len], parameter[name[t].value]]]
variable[value] assign[=] call[name[t].value.upper, parameter[]]
if compare[name[value] in name[self].keywords] begin[:]
name[t].type assign[=] name[value]
return[name[t]] | keyword[def] identifier[t_ID] ( identifier[self] , identifier[t] ):
literal[string]
identifier[t] . identifier[endlexpos] = identifier[t] . identifier[lexpos] + identifier[len] ( identifier[t] . identifier[value] )
identifier[value] = identifier[t] . identifier[value] . identifier[upper] ()
keyword[if] identifier[value] keyword[in] identifier[self] . identifier[keywords] :
identifier[t] . identifier[type] = identifier[value]
keyword[return] identifier[t] | def t_ID(self, t):
"""[a-zA-Z_][0-9a-zA-Z_]*|[a-zA-Z][0-9a-zA-Z_]*[0-9a-zA-Z_]+"""
t.endlexpos = t.lexpos + len(t.value)
value = t.value.upper()
if value in self.keywords:
t.type = value # depends on [control=['if'], data=['value']]
return t |
def set_client_key(self, zmq_socket, client_secret_key_path, server_public_key_path):
'''must call before bind'''
load_and_set_key(zmq_socket, client_secret_key_path)
server_public, _ = zmq.auth.load_certificate(server_public_key_path)
zmq_socket.curve_serverkey = server_public | def function[set_client_key, parameter[self, zmq_socket, client_secret_key_path, server_public_key_path]]:
constant[must call before bind]
call[name[load_and_set_key], parameter[name[zmq_socket], name[client_secret_key_path]]]
<ast.Tuple object at 0x7da1b0a1dde0> assign[=] call[name[zmq].auth.load_certificate, parameter[name[server_public_key_path]]]
name[zmq_socket].curve_serverkey assign[=] name[server_public] | keyword[def] identifier[set_client_key] ( identifier[self] , identifier[zmq_socket] , identifier[client_secret_key_path] , identifier[server_public_key_path] ):
literal[string]
identifier[load_and_set_key] ( identifier[zmq_socket] , identifier[client_secret_key_path] )
identifier[server_public] , identifier[_] = identifier[zmq] . identifier[auth] . identifier[load_certificate] ( identifier[server_public_key_path] )
identifier[zmq_socket] . identifier[curve_serverkey] = identifier[server_public] | def set_client_key(self, zmq_socket, client_secret_key_path, server_public_key_path):
"""must call before bind"""
load_and_set_key(zmq_socket, client_secret_key_path)
(server_public, _) = zmq.auth.load_certificate(server_public_key_path)
zmq_socket.curve_serverkey = server_public |
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise RuntimeError('This plotter does not have an active mapper.')
return self.mapper.SetScalarRange(*clim)
# Use the name to find the desired actor
def update_mapper(mapper):
return mapper.SetScalarRange(*clim)
try:
for m in self._scalar_bar_mappers[name]:
update_mapper(m)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return | def function[update_scalar_bar_range, parameter[self, clim, name]]:
constant[Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
]
if <ast.BoolOp object at 0x7da20c76f820> begin[:]
variable[clim] assign[=] list[[<ast.UnaryOp object at 0x7da20c76e8f0>, <ast.Name object at 0x7da20c76fa00>]]
if compare[call[name[len], parameter[name[clim]]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da20c76eb90>
if compare[name[name] is constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da20c76d840> begin[:]
<ast.Raise object at 0x7da20c76cb80>
return[call[name[self].mapper.SetScalarRange, parameter[<ast.Starred object at 0x7da20c76c160>]]]
def function[update_mapper, parameter[mapper]]:
return[call[name[mapper].SetScalarRange, parameter[<ast.Starred object at 0x7da20c76ded0>]]]
<ast.Try object at 0x7da20c76e6b0>
return[None] | keyword[def] identifier[update_scalar_bar_range] ( identifier[self] , identifier[clim] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[clim] , identifier[float] ) keyword[or] identifier[isinstance] ( identifier[clim] , identifier[int] ):
identifier[clim] =[- identifier[clim] , identifier[clim] ]
keyword[if] identifier[len] ( identifier[clim] )!= literal[int] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[name] keyword[is] keyword[None] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[self] . identifier[mapper] . identifier[SetScalarRange] (* identifier[clim] )
keyword[def] identifier[update_mapper] ( identifier[mapper] ):
keyword[return] identifier[mapper] . identifier[SetScalarRange] (* identifier[clim] )
keyword[try] :
keyword[for] identifier[m] keyword[in] identifier[self] . identifier[_scalar_bar_mappers] [ identifier[name] ]:
identifier[update_mapper] ( identifier[m] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[KeyError] ( literal[string] )
keyword[return] | def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim] # depends on [control=['if'], data=[]]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).') # depends on [control=['if'], data=[]]
if name is None:
if not hasattr(self, 'mapper'):
raise RuntimeError('This plotter does not have an active mapper.') # depends on [control=['if'], data=[]]
return self.mapper.SetScalarRange(*clim) # depends on [control=['if'], data=[]]
# Use the name to find the desired actor
def update_mapper(mapper):
return mapper.SetScalarRange(*clim)
try:
for m in self._scalar_bar_mappers[name]:
update_mapper(m) # depends on [control=['for'], data=['m']] # depends on [control=['try'], data=[]]
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.') # depends on [control=['except'], data=[]]
return |
def remove_species(self, species):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = [get_el_sp(s) for s in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(PeriodicSite(
new_sp_occu, site.frac_coords, self._lattice,
properties=site.properties))
self._sites = new_sites | def function[remove_species, parameter[self, species]]:
constant[
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
]
variable[new_sites] assign[=] list[[]]
variable[species] assign[=] <ast.ListComp object at 0x7da18f813190>
for taget[name[site]] in starred[name[self]._sites] begin[:]
variable[new_sp_occu] assign[=] <ast.DictComp object at 0x7da18f812080>
if compare[call[name[len], parameter[name[new_sp_occu]]] greater[>] constant[0]] begin[:]
call[name[new_sites].append, parameter[call[name[PeriodicSite], parameter[name[new_sp_occu], name[site].frac_coords, name[self]._lattice]]]]
name[self]._sites assign[=] name[new_sites] | keyword[def] identifier[remove_species] ( identifier[self] , identifier[species] ):
literal[string]
identifier[new_sites] =[]
identifier[species] =[ identifier[get_el_sp] ( identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[species] ]
keyword[for] identifier[site] keyword[in] identifier[self] . identifier[_sites] :
identifier[new_sp_occu] ={ identifier[sp] : identifier[amt] keyword[for] identifier[sp] , identifier[amt] keyword[in] identifier[site] . identifier[species] . identifier[items] ()
keyword[if] identifier[sp] keyword[not] keyword[in] identifier[species] }
keyword[if] identifier[len] ( identifier[new_sp_occu] )> literal[int] :
identifier[new_sites] . identifier[append] ( identifier[PeriodicSite] (
identifier[new_sp_occu] , identifier[site] . identifier[frac_coords] , identifier[self] . identifier[_lattice] ,
identifier[properties] = identifier[site] . identifier[properties] ))
identifier[self] . identifier[_sites] = identifier[new_sites] | def remove_species(self, species):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = [get_el_sp(s) for s in species]
for site in self._sites:
new_sp_occu = {sp: amt for (sp, amt) in site.species.items() if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(PeriodicSite(new_sp_occu, site.frac_coords, self._lattice, properties=site.properties)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['site']]
self._sites = new_sites |
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = generator.next()
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "next"): # avoid using the types module
generator = has_finished
has_finished = generator.next()
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished | def function[dispatch, parameter[self, opcode, context]]:
constant[Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered.]
if compare[call[name[id], parameter[name[context]]] in name[self].executing_contexts] begin[:]
variable[generator] assign[=] call[name[self].executing_contexts][call[name[id], parameter[name[context]]]]
<ast.Delete object at 0x7da20c992500>
variable[has_finished] assign[=] call[name[generator].next, parameter[]]
if <ast.UnaryOp object at 0x7da2047eb9a0> begin[:]
call[name[self].executing_contexts][call[name[id], parameter[name[context]]]] assign[=] name[generator]
return[name[has_finished]] | keyword[def] identifier[dispatch] ( identifier[self] , identifier[opcode] , identifier[context] ):
literal[string]
keyword[if] identifier[id] ( identifier[context] ) keyword[in] identifier[self] . identifier[executing_contexts] :
identifier[generator] = identifier[self] . identifier[executing_contexts] [ identifier[id] ( identifier[context] )]
keyword[del] identifier[self] . identifier[executing_contexts] [ identifier[id] ( identifier[context] )]
identifier[has_finished] = identifier[generator] . identifier[next] ()
keyword[else] :
identifier[method] = identifier[self] . identifier[DISPATCH_TABLE] . identifier[get] ( identifier[opcode] , identifier[_OpcodeDispatcher] . identifier[unknown] )
identifier[has_finished] = identifier[method] ( identifier[self] , identifier[context] )
keyword[if] identifier[hasattr] ( identifier[has_finished] , literal[string] ):
identifier[generator] = identifier[has_finished]
identifier[has_finished] = identifier[generator] . identifier[next] ()
keyword[if] keyword[not] identifier[has_finished] :
identifier[self] . identifier[executing_contexts] [ identifier[id] ( identifier[context] )]= identifier[generator]
keyword[return] identifier[has_finished] | def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = generator.next() # depends on [control=['if'], data=[]]
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, 'next'): # avoid using the types module
generator = has_finished
has_finished = generator.next() # depends on [control=['if'], data=[]]
if not has_finished:
self.executing_contexts[id(context)] = generator # depends on [control=['if'], data=[]]
return has_finished |
def digit(decimal, digit, input_base=10):
"""
Find the value of an integer at a specific digit when represented in a
particular base.
Args:
decimal(int): A number represented in base 10 (positive integer).
digit(int): The digit to find where zero is the first, lowest, digit.
base(int): The base to use (default 10).
Returns:
The value at specified digit in the input decimal.
This output value is represented as a base 10 integer.
Examples:
>>> digit(201, 0)
1
>>> digit(201, 1)
0
>>> digit(201, 2)
2
>>> tuple(digit(253, i, 2) for i in range(8))
(1, 0, 1, 1, 1, 1, 1, 1)
# Find the lowest digit of a large hexidecimal number
>>> digit(123456789123456789, 0, 16)
5
"""
if decimal == 0:
return 0
if digit != 0:
return (decimal // (input_base ** digit)) % input_base
else:
return decimal % input_base | def function[digit, parameter[decimal, digit, input_base]]:
constant[
Find the value of an integer at a specific digit when represented in a
particular base.
Args:
decimal(int): A number represented in base 10 (positive integer).
digit(int): The digit to find where zero is the first, lowest, digit.
base(int): The base to use (default 10).
Returns:
The value at specified digit in the input decimal.
This output value is represented as a base 10 integer.
Examples:
>>> digit(201, 0)
1
>>> digit(201, 1)
0
>>> digit(201, 2)
2
>>> tuple(digit(253, i, 2) for i in range(8))
(1, 0, 1, 1, 1, 1, 1, 1)
# Find the lowest digit of a large hexidecimal number
>>> digit(123456789123456789, 0, 16)
5
]
if compare[name[decimal] equal[==] constant[0]] begin[:]
return[constant[0]]
if compare[name[digit] not_equal[!=] constant[0]] begin[:]
return[binary_operation[binary_operation[name[decimal] <ast.FloorDiv object at 0x7da2590d6bc0> binary_operation[name[input_base] ** name[digit]]] <ast.Mod object at 0x7da2590d6920> name[input_base]]] | keyword[def] identifier[digit] ( identifier[decimal] , identifier[digit] , identifier[input_base] = literal[int] ):
literal[string]
keyword[if] identifier[decimal] == literal[int] :
keyword[return] literal[int]
keyword[if] identifier[digit] != literal[int] :
keyword[return] ( identifier[decimal] //( identifier[input_base] ** identifier[digit] ))% identifier[input_base]
keyword[else] :
keyword[return] identifier[decimal] % identifier[input_base] | def digit(decimal, digit, input_base=10):
"""
Find the value of an integer at a specific digit when represented in a
particular base.
Args:
decimal(int): A number represented in base 10 (positive integer).
digit(int): The digit to find where zero is the first, lowest, digit.
base(int): The base to use (default 10).
Returns:
The value at specified digit in the input decimal.
This output value is represented as a base 10 integer.
Examples:
>>> digit(201, 0)
1
>>> digit(201, 1)
0
>>> digit(201, 2)
2
>>> tuple(digit(253, i, 2) for i in range(8))
(1, 0, 1, 1, 1, 1, 1, 1)
# Find the lowest digit of a large hexidecimal number
>>> digit(123456789123456789, 0, 16)
5
"""
if decimal == 0:
return 0 # depends on [control=['if'], data=[]]
if digit != 0:
return decimal // input_base ** digit % input_base # depends on [control=['if'], data=['digit']]
else:
return decimal % input_base |
def user_name_attributes(user, service):
"""Return all available user name related fields and methods."""
attributes = {}
attributes['username'] = user.get_username()
attributes['full_name'] = user.get_full_name()
attributes['short_name'] = user.get_short_name()
return attributes | def function[user_name_attributes, parameter[user, service]]:
constant[Return all available user name related fields and methods.]
variable[attributes] assign[=] dictionary[[], []]
call[name[attributes]][constant[username]] assign[=] call[name[user].get_username, parameter[]]
call[name[attributes]][constant[full_name]] assign[=] call[name[user].get_full_name, parameter[]]
call[name[attributes]][constant[short_name]] assign[=] call[name[user].get_short_name, parameter[]]
return[name[attributes]] | keyword[def] identifier[user_name_attributes] ( identifier[user] , identifier[service] ):
literal[string]
identifier[attributes] ={}
identifier[attributes] [ literal[string] ]= identifier[user] . identifier[get_username] ()
identifier[attributes] [ literal[string] ]= identifier[user] . identifier[get_full_name] ()
identifier[attributes] [ literal[string] ]= identifier[user] . identifier[get_short_name] ()
keyword[return] identifier[attributes] | def user_name_attributes(user, service):
"""Return all available user name related fields and methods."""
attributes = {}
attributes['username'] = user.get_username()
attributes['full_name'] = user.get_full_name()
attributes['short_name'] = user.get_short_name()
return attributes |
def bipartite_as_graph(self) -> Graph: # pragma: no cover
"""Returns a :class:`graphviz.Graph` representation of this bipartite graph."""
if Graph is None:
raise ImportError('The graphviz package is required to draw the graph.')
graph = Graph()
nodes_left = {} # type: Dict[TLeft, str]
nodes_right = {} # type: Dict[TRight, str]
node_id = 0
for (left, right), value in self.bipartite._edges.items():
if left not in nodes_left:
name = 'node{:d}'.format(node_id)
nodes_left[left] = name
label = str(self.subjects_by_id[left])
graph.node(name, label=label)
node_id += 1
if right not in nodes_right:
name = 'node{:d}'.format(node_id)
nodes_right[right] = name
label = str(self.automaton.patterns[right][0])
graph.node(name, label=label)
node_id += 1
edge_label = value is not True and str(value) or ''
graph.edge(nodes_left[left], nodes_right[right], edge_label)
return graph | def function[bipartite_as_graph, parameter[self]]:
constant[Returns a :class:`graphviz.Graph` representation of this bipartite graph.]
if compare[name[Graph] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b23450f0>
variable[graph] assign[=] call[name[Graph], parameter[]]
variable[nodes_left] assign[=] dictionary[[], []]
variable[nodes_right] assign[=] dictionary[[], []]
variable[node_id] assign[=] constant[0]
for taget[tuple[[<ast.Tuple object at 0x7da1b23459f0>, <ast.Name object at 0x7da1b2345720>]]] in starred[call[name[self].bipartite._edges.items, parameter[]]] begin[:]
if compare[name[left] <ast.NotIn object at 0x7da2590d7190> name[nodes_left]] begin[:]
variable[name] assign[=] call[constant[node{:d}].format, parameter[name[node_id]]]
call[name[nodes_left]][name[left]] assign[=] name[name]
variable[label] assign[=] call[name[str], parameter[call[name[self].subjects_by_id][name[left]]]]
call[name[graph].node, parameter[name[name]]]
<ast.AugAssign object at 0x7da1b2347c70>
if compare[name[right] <ast.NotIn object at 0x7da2590d7190> name[nodes_right]] begin[:]
variable[name] assign[=] call[constant[node{:d}].format, parameter[name[node_id]]]
call[name[nodes_right]][name[right]] assign[=] name[name]
variable[label] assign[=] call[name[str], parameter[call[call[name[self].automaton.patterns][name[right]]][constant[0]]]]
call[name[graph].node, parameter[name[name]]]
<ast.AugAssign object at 0x7da1b06cecb0>
variable[edge_label] assign[=] <ast.BoolOp object at 0x7da1b06ce890>
call[name[graph].edge, parameter[call[name[nodes_left]][name[left]], call[name[nodes_right]][name[right]], name[edge_label]]]
return[name[graph]] | keyword[def] identifier[bipartite_as_graph] ( identifier[self] )-> identifier[Graph] :
literal[string]
keyword[if] identifier[Graph] keyword[is] keyword[None] :
keyword[raise] identifier[ImportError] ( literal[string] )
identifier[graph] = identifier[Graph] ()
identifier[nodes_left] ={}
identifier[nodes_right] ={}
identifier[node_id] = literal[int]
keyword[for] ( identifier[left] , identifier[right] ), identifier[value] keyword[in] identifier[self] . identifier[bipartite] . identifier[_edges] . identifier[items] ():
keyword[if] identifier[left] keyword[not] keyword[in] identifier[nodes_left] :
identifier[name] = literal[string] . identifier[format] ( identifier[node_id] )
identifier[nodes_left] [ identifier[left] ]= identifier[name]
identifier[label] = identifier[str] ( identifier[self] . identifier[subjects_by_id] [ identifier[left] ])
identifier[graph] . identifier[node] ( identifier[name] , identifier[label] = identifier[label] )
identifier[node_id] += literal[int]
keyword[if] identifier[right] keyword[not] keyword[in] identifier[nodes_right] :
identifier[name] = literal[string] . identifier[format] ( identifier[node_id] )
identifier[nodes_right] [ identifier[right] ]= identifier[name]
identifier[label] = identifier[str] ( identifier[self] . identifier[automaton] . identifier[patterns] [ identifier[right] ][ literal[int] ])
identifier[graph] . identifier[node] ( identifier[name] , identifier[label] = identifier[label] )
identifier[node_id] += literal[int]
identifier[edge_label] = identifier[value] keyword[is] keyword[not] keyword[True] keyword[and] identifier[str] ( identifier[value] ) keyword[or] literal[string]
identifier[graph] . identifier[edge] ( identifier[nodes_left] [ identifier[left] ], identifier[nodes_right] [ identifier[right] ], identifier[edge_label] )
keyword[return] identifier[graph] | def bipartite_as_graph(self) -> Graph: # pragma: no cover
'Returns a :class:`graphviz.Graph` representation of this bipartite graph.'
if Graph is None:
raise ImportError('The graphviz package is required to draw the graph.') # depends on [control=['if'], data=[]]
graph = Graph()
nodes_left = {} # type: Dict[TLeft, str]
nodes_right = {} # type: Dict[TRight, str]
node_id = 0
for ((left, right), value) in self.bipartite._edges.items():
if left not in nodes_left:
name = 'node{:d}'.format(node_id)
nodes_left[left] = name
label = str(self.subjects_by_id[left])
graph.node(name, label=label)
node_id += 1 # depends on [control=['if'], data=['left', 'nodes_left']]
if right not in nodes_right:
name = 'node{:d}'.format(node_id)
nodes_right[right] = name
label = str(self.automaton.patterns[right][0])
graph.node(name, label=label)
node_id += 1 # depends on [control=['if'], data=['right', 'nodes_right']]
edge_label = value is not True and str(value) or ''
graph.edge(nodes_left[left], nodes_right[right], edge_label) # depends on [control=['for'], data=[]]
return graph |
def _gather_group_members(group, groups, users):
'''
Gather group members
'''
_group = __salt__['group.info'](group)
if not _group:
log.warning('Group %s does not exist, ignoring.', group)
return
for member in _group['members']:
if member not in users:
users[member] = groups[group] | def function[_gather_group_members, parameter[group, groups, users]]:
constant[
Gather group members
]
variable[_group] assign[=] call[call[name[__salt__]][constant[group.info]], parameter[name[group]]]
if <ast.UnaryOp object at 0x7da1b211c220> begin[:]
call[name[log].warning, parameter[constant[Group %s does not exist, ignoring.], name[group]]]
return[None]
for taget[name[member]] in starred[call[name[_group]][constant[members]]] begin[:]
if compare[name[member] <ast.NotIn object at 0x7da2590d7190> name[users]] begin[:]
call[name[users]][name[member]] assign[=] call[name[groups]][name[group]] | keyword[def] identifier[_gather_group_members] ( identifier[group] , identifier[groups] , identifier[users] ):
literal[string]
identifier[_group] = identifier[__salt__] [ literal[string] ]( identifier[group] )
keyword[if] keyword[not] identifier[_group] :
identifier[log] . identifier[warning] ( literal[string] , identifier[group] )
keyword[return]
keyword[for] identifier[member] keyword[in] identifier[_group] [ literal[string] ]:
keyword[if] identifier[member] keyword[not] keyword[in] identifier[users] :
identifier[users] [ identifier[member] ]= identifier[groups] [ identifier[group] ] | def _gather_group_members(group, groups, users):
"""
Gather group members
"""
_group = __salt__['group.info'](group)
if not _group:
log.warning('Group %s does not exist, ignoring.', group)
return # depends on [control=['if'], data=[]]
for member in _group['members']:
if member not in users:
users[member] = groups[group] # depends on [control=['if'], data=['member', 'users']] # depends on [control=['for'], data=['member']] |
def set_timestamp(self,timestamp=None):
"""
Set the timestamp of the linguistic processor, set to None for the current time
@type timestamp:string
@param timestamp: version of the linguistic processor
"""
if timestamp is None:
import time
timestamp = time.strftime('%Y-%m-%dT%H:%M:%S%Z')
self.node.set('timestamp',timestamp) | def function[set_timestamp, parameter[self, timestamp]]:
constant[
Set the timestamp of the linguistic processor, set to None for the current time
@type timestamp:string
@param timestamp: version of the linguistic processor
]
if compare[name[timestamp] is constant[None]] begin[:]
import module[time]
variable[timestamp] assign[=] call[name[time].strftime, parameter[constant[%Y-%m-%dT%H:%M:%S%Z]]]
call[name[self].node.set, parameter[constant[timestamp], name[timestamp]]] | keyword[def] identifier[set_timestamp] ( identifier[self] , identifier[timestamp] = keyword[None] ):
literal[string]
keyword[if] identifier[timestamp] keyword[is] keyword[None] :
keyword[import] identifier[time]
identifier[timestamp] = identifier[time] . identifier[strftime] ( literal[string] )
identifier[self] . identifier[node] . identifier[set] ( literal[string] , identifier[timestamp] ) | def set_timestamp(self, timestamp=None):
"""
Set the timestamp of the linguistic processor, set to None for the current time
@type timestamp:string
@param timestamp: version of the linguistic processor
"""
if timestamp is None:
import time
timestamp = time.strftime('%Y-%m-%dT%H:%M:%S%Z') # depends on [control=['if'], data=['timestamp']]
self.node.set('timestamp', timestamp) |
def set(self, key, value, lease=None, return_previous=None, timeout=None):
"""
Set the value for the key in the key-value store.
Setting a value on a key increments the revision
of the key-value store and generates one event in
the event history.
:param key: key is the key, in bytes, to put into
the key-value store.
:type key: bytes
:param value: value is the value, in bytes, to
associate with the key in the key-value store.
:key value: bytes
:param lease: Lease to associate the key in the
key-value store with.
:type lease: instance of :class:`txaioetcd.Lease` or None
:param return_previous: If set, return the previous key-value.
:type return_previous: bool or None
:param timeout: Request timeout in seconds.
:type timeout: int
:returns: Revision info
:rtype: instance of :class:`txaioetcd.Revision`
"""
assembler = commons.PutRequestAssembler(self._url, key, value, lease, return_previous)
obj = yield self._post(assembler.url, assembler.data, timeout)
revision = Revision._parse(obj)
returnValue(revision) | def function[set, parameter[self, key, value, lease, return_previous, timeout]]:
constant[
Set the value for the key in the key-value store.
Setting a value on a key increments the revision
of the key-value store and generates one event in
the event history.
:param key: key is the key, in bytes, to put into
the key-value store.
:type key: bytes
:param value: value is the value, in bytes, to
associate with the key in the key-value store.
:key value: bytes
:param lease: Lease to associate the key in the
key-value store with.
:type lease: instance of :class:`txaioetcd.Lease` or None
:param return_previous: If set, return the previous key-value.
:type return_previous: bool or None
:param timeout: Request timeout in seconds.
:type timeout: int
:returns: Revision info
:rtype: instance of :class:`txaioetcd.Revision`
]
variable[assembler] assign[=] call[name[commons].PutRequestAssembler, parameter[name[self]._url, name[key], name[value], name[lease], name[return_previous]]]
variable[obj] assign[=] <ast.Yield object at 0x7da1b258a140>
variable[revision] assign[=] call[name[Revision]._parse, parameter[name[obj]]]
call[name[returnValue], parameter[name[revision]]] | keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[value] , identifier[lease] = keyword[None] , identifier[return_previous] = keyword[None] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[assembler] = identifier[commons] . identifier[PutRequestAssembler] ( identifier[self] . identifier[_url] , identifier[key] , identifier[value] , identifier[lease] , identifier[return_previous] )
identifier[obj] = keyword[yield] identifier[self] . identifier[_post] ( identifier[assembler] . identifier[url] , identifier[assembler] . identifier[data] , identifier[timeout] )
identifier[revision] = identifier[Revision] . identifier[_parse] ( identifier[obj] )
identifier[returnValue] ( identifier[revision] ) | def set(self, key, value, lease=None, return_previous=None, timeout=None):
"""
Set the value for the key in the key-value store.
Setting a value on a key increments the revision
of the key-value store and generates one event in
the event history.
:param key: key is the key, in bytes, to put into
the key-value store.
:type key: bytes
:param value: value is the value, in bytes, to
associate with the key in the key-value store.
:key value: bytes
:param lease: Lease to associate the key in the
key-value store with.
:type lease: instance of :class:`txaioetcd.Lease` or None
:param return_previous: If set, return the previous key-value.
:type return_previous: bool or None
:param timeout: Request timeout in seconds.
:type timeout: int
:returns: Revision info
:rtype: instance of :class:`txaioetcd.Revision`
"""
assembler = commons.PutRequestAssembler(self._url, key, value, lease, return_previous)
obj = (yield self._post(assembler.url, assembler.data, timeout))
revision = Revision._parse(obj)
returnValue(revision) |
def getDuration(self):
"""Returns the time in minutes taken for this analysis.
If the analysis is not yet 'ready to process', returns 0
If the analysis is still in progress (not yet verified),
duration = date_verified - date_start_process
Otherwise:
duration = current_datetime - date_start_process
:return: time in minutes taken for this analysis
:rtype: int
"""
starttime = self.getStartProcessDate()
if not starttime:
# The analysis is not yet ready to be processed
return 0
endtime = self.getDateVerified() or DateTime()
# Duration in minutes
duration = (endtime - starttime) * 24 * 60
return duration | def function[getDuration, parameter[self]]:
constant[Returns the time in minutes taken for this analysis.
If the analysis is not yet 'ready to process', returns 0
If the analysis is still in progress (not yet verified),
duration = date_verified - date_start_process
Otherwise:
duration = current_datetime - date_start_process
:return: time in minutes taken for this analysis
:rtype: int
]
variable[starttime] assign[=] call[name[self].getStartProcessDate, parameter[]]
if <ast.UnaryOp object at 0x7da1b2314910> begin[:]
return[constant[0]]
variable[endtime] assign[=] <ast.BoolOp object at 0x7da1b2317b20>
variable[duration] assign[=] binary_operation[binary_operation[binary_operation[name[endtime] - name[starttime]] * constant[24]] * constant[60]]
return[name[duration]] | keyword[def] identifier[getDuration] ( identifier[self] ):
literal[string]
identifier[starttime] = identifier[self] . identifier[getStartProcessDate] ()
keyword[if] keyword[not] identifier[starttime] :
keyword[return] literal[int]
identifier[endtime] = identifier[self] . identifier[getDateVerified] () keyword[or] identifier[DateTime] ()
identifier[duration] =( identifier[endtime] - identifier[starttime] )* literal[int] * literal[int]
keyword[return] identifier[duration] | def getDuration(self):
"""Returns the time in minutes taken for this analysis.
If the analysis is not yet 'ready to process', returns 0
If the analysis is still in progress (not yet verified),
duration = date_verified - date_start_process
Otherwise:
duration = current_datetime - date_start_process
:return: time in minutes taken for this analysis
:rtype: int
"""
starttime = self.getStartProcessDate()
if not starttime:
# The analysis is not yet ready to be processed
return 0 # depends on [control=['if'], data=[]]
endtime = self.getDateVerified() or DateTime()
# Duration in minutes
duration = (endtime - starttime) * 24 * 60
return duration |
def setlist(self, key, values):
"""
Sets <key>'s list of values to <values>. Existing items with key <key>
are first replaced with new values from <values>. Any remaining old
items that haven't been replaced with new values are deleted, and any
new values from <values> that don't have corresponding items with <key>
to replace are appended to the end of the list of all items.
If values is an empty list, [], <key> is deleted, equivalent in action
to del self[<key>].
Example:
omd = omdict([(1,1), (2,2)])
omd.setlist(1, [11, 111])
omd.allitems() == [(1,11), (2,2), (1,111)]
omd = omdict([(1,1), (1,11), (2,2), (1,111)])
omd.setlist(1, [None])
omd.allitems() == [(1,None), (2,2)]
omd = omdict([(1,1), (1,11), (2,2), (1,111)])
omd.setlist(1, [])
omd.allitems() == [(2,2)]
Returns: <self>.
"""
if not values and key in self:
self.pop(key)
else:
it = zip_longest(
list(self._map.get(key, [])), values, fillvalue=_absent)
for node, value in it:
if node is not _absent and value is not _absent:
node.value = value
elif node is _absent:
self.add(key, value)
elif value is _absent:
self._map[key].remove(node)
self._items.removenode(node)
return self | def function[setlist, parameter[self, key, values]]:
constant[
Sets <key>'s list of values to <values>. Existing items with key <key>
are first replaced with new values from <values>. Any remaining old
items that haven't been replaced with new values are deleted, and any
new values from <values> that don't have corresponding items with <key>
to replace are appended to the end of the list of all items.
If values is an empty list, [], <key> is deleted, equivalent in action
to del self[<key>].
Example:
omd = omdict([(1,1), (2,2)])
omd.setlist(1, [11, 111])
omd.allitems() == [(1,11), (2,2), (1,111)]
omd = omdict([(1,1), (1,11), (2,2), (1,111)])
omd.setlist(1, [None])
omd.allitems() == [(1,None), (2,2)]
omd = omdict([(1,1), (1,11), (2,2), (1,111)])
omd.setlist(1, [])
omd.allitems() == [(2,2)]
Returns: <self>.
]
if <ast.BoolOp object at 0x7da18dc05570> begin[:]
call[name[self].pop, parameter[name[key]]]
return[name[self]] | keyword[def] identifier[setlist] ( identifier[self] , identifier[key] , identifier[values] ):
literal[string]
keyword[if] keyword[not] identifier[values] keyword[and] identifier[key] keyword[in] identifier[self] :
identifier[self] . identifier[pop] ( identifier[key] )
keyword[else] :
identifier[it] = identifier[zip_longest] (
identifier[list] ( identifier[self] . identifier[_map] . identifier[get] ( identifier[key] ,[])), identifier[values] , identifier[fillvalue] = identifier[_absent] )
keyword[for] identifier[node] , identifier[value] keyword[in] identifier[it] :
keyword[if] identifier[node] keyword[is] keyword[not] identifier[_absent] keyword[and] identifier[value] keyword[is] keyword[not] identifier[_absent] :
identifier[node] . identifier[value] = identifier[value]
keyword[elif] identifier[node] keyword[is] identifier[_absent] :
identifier[self] . identifier[add] ( identifier[key] , identifier[value] )
keyword[elif] identifier[value] keyword[is] identifier[_absent] :
identifier[self] . identifier[_map] [ identifier[key] ]. identifier[remove] ( identifier[node] )
identifier[self] . identifier[_items] . identifier[removenode] ( identifier[node] )
keyword[return] identifier[self] | def setlist(self, key, values):
"""
Sets <key>'s list of values to <values>. Existing items with key <key>
are first replaced with new values from <values>. Any remaining old
items that haven't been replaced with new values are deleted, and any
new values from <values> that don't have corresponding items with <key>
to replace are appended to the end of the list of all items.
If values is an empty list, [], <key> is deleted, equivalent in action
to del self[<key>].
Example:
omd = omdict([(1,1), (2,2)])
omd.setlist(1, [11, 111])
omd.allitems() == [(1,11), (2,2), (1,111)]
omd = omdict([(1,1), (1,11), (2,2), (1,111)])
omd.setlist(1, [None])
omd.allitems() == [(1,None), (2,2)]
omd = omdict([(1,1), (1,11), (2,2), (1,111)])
omd.setlist(1, [])
omd.allitems() == [(2,2)]
Returns: <self>.
"""
if not values and key in self:
self.pop(key) # depends on [control=['if'], data=[]]
else:
it = zip_longest(list(self._map.get(key, [])), values, fillvalue=_absent)
for (node, value) in it:
if node is not _absent and value is not _absent:
node.value = value # depends on [control=['if'], data=[]]
elif node is _absent:
self.add(key, value) # depends on [control=['if'], data=[]]
elif value is _absent:
self._map[key].remove(node)
self._items.removenode(node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return self |
def data_source_and_uncertainty_flags(self, value=None):
"""Corresponds to IDD Field `data_source_and_uncertainty_flags` Initial
day of weather file is checked by EnergyPlus for validity (as shown
below) Each field is checked for "missing" as shown below. Reasonable
values, calculated values or the last "good" value is substituted.
Args:
value (str): value for IDD Field `data_source_and_uncertainty_flags`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `data_source_and_uncertainty_flags`'.format(value))
if ',' in value:
raise ValueError(
'value should not contain a comma '
'for field `data_source_and_uncertainty_flags`')
self._data_source_and_uncertainty_flags = value | def function[data_source_and_uncertainty_flags, parameter[self, value]]:
constant[Corresponds to IDD Field `data_source_and_uncertainty_flags` Initial
day of weather file is checked by EnergyPlus for validity (as shown
below) Each field is checked for "missing" as shown below. Reasonable
values, calculated values or the last "good" value is substituted.
Args:
value (str): value for IDD Field `data_source_and_uncertainty_flags`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0fb1ab0>
if compare[constant[,] in name[value]] begin[:]
<ast.Raise object at 0x7da1b0fb1390>
name[self]._data_source_and_uncertainty_flags assign[=] name[value] | keyword[def] identifier[data_source_and_uncertainty_flags] ( identifier[self] , identifier[value] = keyword[None] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[str] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
keyword[if] literal[string] keyword[in] identifier[value] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] )
identifier[self] . identifier[_data_source_and_uncertainty_flags] = identifier[value] | def data_source_and_uncertainty_flags(self, value=None):
"""Corresponds to IDD Field `data_source_and_uncertainty_flags` Initial
day of weather file is checked by EnergyPlus for validity (as shown
below) Each field is checked for "missing" as shown below. Reasonable
values, calculated values or the last "good" value is substituted.
Args:
value (str): value for IDD Field `data_source_and_uncertainty_flags`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type str for field `data_source_and_uncertainty_flags`'.format(value)) # depends on [control=['except'], data=[]]
if ',' in value:
raise ValueError('value should not contain a comma for field `data_source_and_uncertainty_flags`') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value']]
self._data_source_and_uncertainty_flags = value |
def plot_curves_z(data, name, title=None):
"""Generates a simple plot of the quasiparticle weight decay curves given
data object with doping setup"""
plt.figure()
for zet, c in zip(data['zeta'], data['doping']):
plt.plot(data['u_int'], zet[:, 0], label='$n={}$'.format(str(c)))
if title != None:
plt.title(title)
label_saves(name+'.png') | def function[plot_curves_z, parameter[data, name, title]]:
constant[Generates a simple plot of the quasiparticle weight decay curves given
data object with doping setup]
call[name[plt].figure, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da2054a40a0>, <ast.Name object at 0x7da2054a46a0>]]] in starred[call[name[zip], parameter[call[name[data]][constant[zeta]], call[name[data]][constant[doping]]]]] begin[:]
call[name[plt].plot, parameter[call[name[data]][constant[u_int]], call[name[zet]][tuple[[<ast.Slice object at 0x7da2054a4d30>, <ast.Constant object at 0x7da2054a7790>]]]]]
if compare[name[title] not_equal[!=] constant[None]] begin[:]
call[name[plt].title, parameter[name[title]]]
call[name[label_saves], parameter[binary_operation[name[name] + constant[.png]]]] | keyword[def] identifier[plot_curves_z] ( identifier[data] , identifier[name] , identifier[title] = keyword[None] ):
literal[string]
identifier[plt] . identifier[figure] ()
keyword[for] identifier[zet] , identifier[c] keyword[in] identifier[zip] ( identifier[data] [ literal[string] ], identifier[data] [ literal[string] ]):
identifier[plt] . identifier[plot] ( identifier[data] [ literal[string] ], identifier[zet] [:, literal[int] ], identifier[label] = literal[string] . identifier[format] ( identifier[str] ( identifier[c] )))
keyword[if] identifier[title] != keyword[None] :
identifier[plt] . identifier[title] ( identifier[title] )
identifier[label_saves] ( identifier[name] + literal[string] ) | def plot_curves_z(data, name, title=None):
"""Generates a simple plot of the quasiparticle weight decay curves given
data object with doping setup"""
plt.figure()
for (zet, c) in zip(data['zeta'], data['doping']):
plt.plot(data['u_int'], zet[:, 0], label='$n={}$'.format(str(c))) # depends on [control=['for'], data=[]]
if title != None:
plt.title(title) # depends on [control=['if'], data=['title']]
label_saves(name + '.png') |
def provider_for_url(self, url):
"""
Find the right provider for a URL
"""
for provider, regex in self.get_registry().items():
if re.match(regex, url) is not None:
return provider
raise OEmbedMissingEndpoint('No endpoint matches URL: %s' % url) | def function[provider_for_url, parameter[self, url]]:
constant[
Find the right provider for a URL
]
for taget[tuple[[<ast.Name object at 0x7da18ede4490>, <ast.Name object at 0x7da18ede46a0>]]] in starred[call[call[name[self].get_registry, parameter[]].items, parameter[]]] begin[:]
if compare[call[name[re].match, parameter[name[regex], name[url]]] is_not constant[None]] begin[:]
return[name[provider]]
<ast.Raise object at 0x7da204346ec0> | keyword[def] identifier[provider_for_url] ( identifier[self] , identifier[url] ):
literal[string]
keyword[for] identifier[provider] , identifier[regex] keyword[in] identifier[self] . identifier[get_registry] (). identifier[items] ():
keyword[if] identifier[re] . identifier[match] ( identifier[regex] , identifier[url] ) keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[provider]
keyword[raise] identifier[OEmbedMissingEndpoint] ( literal[string] % identifier[url] ) | def provider_for_url(self, url):
"""
Find the right provider for a URL
"""
for (provider, regex) in self.get_registry().items():
if re.match(regex, url) is not None:
return provider # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
raise OEmbedMissingEndpoint('No endpoint matches URL: %s' % url) |
def output_of(*cmd: Optional[str], **kwargs) -> str:
"""Invokes a subprocess and returns its output as a string.
Args:
cmd: Components of the command to execute, e.g. ["echo", "dog"].
**kwargs: Extra arguments for asyncio.create_subprocess_shell, such as
a cwd (current working directory) argument.
Returns:
A (captured output, captured error output, return code) triplet. The
captured outputs will be None if the out or err parameters were not set
to an instance of TeeCapture.
Raises:
subprocess.CalledProcessError: The process returned a non-zero error
code and raise_on_fail was set.
"""
result = cast(str, run_cmd(*cmd,
log_run_to_stderr=False,
out=TeeCapture(),
**kwargs).out)
# Strip final newline.
if result.endswith('\n'):
result = result[:-1]
return result | def function[output_of, parameter[]]:
constant[Invokes a subprocess and returns its output as a string.
Args:
cmd: Components of the command to execute, e.g. ["echo", "dog"].
**kwargs: Extra arguments for asyncio.create_subprocess_shell, such as
a cwd (current working directory) argument.
Returns:
A (captured output, captured error output, return code) triplet. The
captured outputs will be None if the out or err parameters were not set
to an instance of TeeCapture.
Raises:
subprocess.CalledProcessError: The process returned a non-zero error
code and raise_on_fail was set.
]
variable[result] assign[=] call[name[cast], parameter[name[str], call[name[run_cmd], parameter[<ast.Starred object at 0x7da1b1ce9060>]].out]]
if call[name[result].endswith, parameter[constant[
]]] begin[:]
variable[result] assign[=] call[name[result]][<ast.Slice object at 0x7da1b1ce8340>]
return[name[result]] | keyword[def] identifier[output_of] (* identifier[cmd] : identifier[Optional] [ identifier[str] ],** identifier[kwargs] )-> identifier[str] :
literal[string]
identifier[result] = identifier[cast] ( identifier[str] , identifier[run_cmd] (* identifier[cmd] ,
identifier[log_run_to_stderr] = keyword[False] ,
identifier[out] = identifier[TeeCapture] (),
** identifier[kwargs] ). identifier[out] )
keyword[if] identifier[result] . identifier[endswith] ( literal[string] ):
identifier[result] = identifier[result] [:- literal[int] ]
keyword[return] identifier[result] | def output_of(*cmd: Optional[str], **kwargs) -> str:
"""Invokes a subprocess and returns its output as a string.
Args:
cmd: Components of the command to execute, e.g. ["echo", "dog"].
**kwargs: Extra arguments for asyncio.create_subprocess_shell, such as
a cwd (current working directory) argument.
Returns:
A (captured output, captured error output, return code) triplet. The
captured outputs will be None if the out or err parameters were not set
to an instance of TeeCapture.
Raises:
subprocess.CalledProcessError: The process returned a non-zero error
code and raise_on_fail was set.
"""
result = cast(str, run_cmd(*cmd, log_run_to_stderr=False, out=TeeCapture(), **kwargs).out)
# Strip final newline.
if result.endswith('\n'):
result = result[:-1] # depends on [control=['if'], data=[]]
return result |
def retry_after(cls, response, default=5, _now=time.time):
"""
Parse the Retry-After value from a response.
"""
val = response.headers.getRawHeaders(b'retry-after', [default])[0]
try:
return int(val)
except ValueError:
return http.stringToDatetime(val) - _now() | def function[retry_after, parameter[cls, response, default, _now]]:
constant[
Parse the Retry-After value from a response.
]
variable[val] assign[=] call[call[name[response].headers.getRawHeaders, parameter[constant[b'retry-after'], list[[<ast.Name object at 0x7da18f58f040>]]]]][constant[0]]
<ast.Try object at 0x7da18f58dab0> | keyword[def] identifier[retry_after] ( identifier[cls] , identifier[response] , identifier[default] = literal[int] , identifier[_now] = identifier[time] . identifier[time] ):
literal[string]
identifier[val] = identifier[response] . identifier[headers] . identifier[getRawHeaders] ( literal[string] ,[ identifier[default] ])[ literal[int] ]
keyword[try] :
keyword[return] identifier[int] ( identifier[val] )
keyword[except] identifier[ValueError] :
keyword[return] identifier[http] . identifier[stringToDatetime] ( identifier[val] )- identifier[_now] () | def retry_after(cls, response, default=5, _now=time.time):
"""
Parse the Retry-After value from a response.
"""
val = response.headers.getRawHeaders(b'retry-after', [default])[0]
try:
return int(val) # depends on [control=['try'], data=[]]
except ValueError:
return http.stringToDatetime(val) - _now() # depends on [control=['except'], data=[]] |
def _set_traffic_eng(self, v, load=False):
"""
Setter method for traffic_eng, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/traffic_eng (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_eng is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_eng() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=traffic_eng.traffic_eng, is_container='container', presence=False, yang_name="traffic-eng", rest_name="traffic-eng", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'interface level dynamic bypass traffic engineering parameters', u'cli-full-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """traffic_eng must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=traffic_eng.traffic_eng, is_container='container', presence=False, yang_name="traffic-eng", rest_name="traffic-eng", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'interface level dynamic bypass traffic engineering parameters', u'cli-full-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__traffic_eng = t
if hasattr(self, '_set'):
self._set() | def function[_set_traffic_eng, parameter[self, v, load]]:
constant[
Setter method for traffic_eng, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/traffic_eng (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_eng is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_eng() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18bccb6a0>
name[self].__traffic_eng assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_traffic_eng] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[traffic_eng] . identifier[traffic_eng] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__traffic_eng] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_traffic_eng(self, v, load=False):
"""
Setter method for traffic_eng, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/interface_dynamic_bypass/mpls_interface_dynamic_bypass_sub_cmds/traffic_eng (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_eng is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_eng() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=traffic_eng.traffic_eng, is_container='container', presence=False, yang_name='traffic-eng', rest_name='traffic-eng', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'info': u'interface level dynamic bypass traffic engineering parameters', u'cli-full-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'traffic_eng must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=traffic_eng.traffic_eng, is_container=\'container\', presence=False, yang_name="traffic-eng", rest_name="traffic-eng", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-compact-syntax\': None, u\'info\': u\'interface level dynamic bypass traffic engineering parameters\', u\'cli-full-no\': None, u\'cli-incomplete-command\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__traffic_eng = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def getv(self, r):
""" Like the above, but returns the <int> value.
"""
v = self.get(r)
if not is_unknown(v):
try:
v = int(v)
except ValueError:
v = None
else:
v = None
return v | def function[getv, parameter[self, r]]:
constant[ Like the above, but returns the <int> value.
]
variable[v] assign[=] call[name[self].get, parameter[name[r]]]
if <ast.UnaryOp object at 0x7da20c6c4cd0> begin[:]
<ast.Try object at 0x7da20c6c4dc0>
return[name[v]] | keyword[def] identifier[getv] ( identifier[self] , identifier[r] ):
literal[string]
identifier[v] = identifier[self] . identifier[get] ( identifier[r] )
keyword[if] keyword[not] identifier[is_unknown] ( identifier[v] ):
keyword[try] :
identifier[v] = identifier[int] ( identifier[v] )
keyword[except] identifier[ValueError] :
identifier[v] = keyword[None]
keyword[else] :
identifier[v] = keyword[None]
keyword[return] identifier[v] | def getv(self, r):
""" Like the above, but returns the <int> value.
"""
v = self.get(r)
if not is_unknown(v):
try:
v = int(v) # depends on [control=['try'], data=[]]
except ValueError:
v = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
v = None
return v |
def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with :math:`IC \\leq ICMAX`.
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(5)
>>> icmax(2.0)
>>> states.ic(-1.0, 0.0, 1.0, 2.0, 3.0)
>>> states.ic
ic(0.0, 0.0, 1.0, 2.0, 2.0)
"""
if upper is None:
control = self.subseqs.seqs.model.parameters.control
upper = control.icmax
hland_sequences.State1DSequence.trim(self, lower, upper) | def function[trim, parameter[self, lower, upper]]:
constant[Trim upper values in accordance with :math:`IC \leq ICMAX`.
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(5)
>>> icmax(2.0)
>>> states.ic(-1.0, 0.0, 1.0, 2.0, 3.0)
>>> states.ic
ic(0.0, 0.0, 1.0, 2.0, 2.0)
]
if compare[name[upper] is constant[None]] begin[:]
variable[control] assign[=] name[self].subseqs.seqs.model.parameters.control
variable[upper] assign[=] name[control].icmax
call[name[hland_sequences].State1DSequence.trim, parameter[name[self], name[lower], name[upper]]] | keyword[def] identifier[trim] ( identifier[self] , identifier[lower] = keyword[None] , identifier[upper] = keyword[None] ):
literal[string]
keyword[if] identifier[upper] keyword[is] keyword[None] :
identifier[control] = identifier[self] . identifier[subseqs] . identifier[seqs] . identifier[model] . identifier[parameters] . identifier[control]
identifier[upper] = identifier[control] . identifier[icmax]
identifier[hland_sequences] . identifier[State1DSequence] . identifier[trim] ( identifier[self] , identifier[lower] , identifier[upper] ) | def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with :math:`IC \\leq ICMAX`.
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(5)
>>> icmax(2.0)
>>> states.ic(-1.0, 0.0, 1.0, 2.0, 3.0)
>>> states.ic
ic(0.0, 0.0, 1.0, 2.0, 2.0)
"""
if upper is None:
control = self.subseqs.seqs.model.parameters.control
upper = control.icmax # depends on [control=['if'], data=['upper']]
hland_sequences.State1DSequence.trim(self, lower, upper) |
def proccesser_markdown(lowstate_item, config, **kwargs):
'''
Takes low state data and returns a dict of proccessed data
that is by default used in a jinja template when rendering a markdown highstate_doc.
This `lowstate_item_markdown` given a lowstate item, returns a dict like:
.. code-block:: yaml
vars: # the raw lowstate_item that was proccessed
id: # the 'id' of the state.
id_full: # combo of the state type and id "state: id"
state: # name of the salt state module
function: # name of the state function
name: # value of 'name:' passed to the salt state module
state_function: # the state name and function name
markdown: # text data to describe a state
requisites: # requisite like [watch_in, require_in]
details: # state name, parameters and other details like file contents
'''
# TODO: switch or ... ext call.
s = lowstate_item
state_function = '{0}.{1}'.format(s['state'], s['fun'])
id_full = '{0}: {1}'.format(s['state'], s['__id__'])
# TODO: use salt defined STATE_REQUISITE_IN_KEYWORDS
requisites = ''
if s.get('watch'):
requisites += 'run or update after changes in:\n'
for w in s.get('watch', []):
requisites += _format_markdown_requisite(w.items()[0][0], w.items()[0][1])
requisites += '\n'
if s.get('watch_in'):
requisites += 'after changes, run or update:\n'
for w in s.get('watch_in', []):
requisites += _format_markdown_requisite(w.items()[0][0], w.items()[0][1])
requisites += '\n'
if s.get('require') and s.get('require'):
requisites += 'require:\n'
for w in s.get('require', []):
requisites += _format_markdown_requisite(w.items()[0][0], w.items()[0][1])
requisites += '\n'
if s.get('require_in'):
requisites += 'required in:\n'
for w in s.get('require_in', []):
requisites += _format_markdown_requisite(w.items()[0][0], w.items()[0][1])
requisites += '\n'
details = ''
if state_function == 'highstate_doc.note':
if 'contents' in s:
details += '\n{0}\n'.format(s['contents'])
if 'source' in s:
text = __salt__['cp.get_file_str'](s['source'])
if text:
details += '\n{0}\n'.format(text)
else:
details += '\n{0}\n'.format('ERROR: opening {0}'.format(s['source']))
if state_function == 'pkg.installed':
pkgs = s.get('pkgs', s.get('name'))
details += '\n```\ninstall: {0}\n```\n'.format(pkgs)
if state_function == 'file.recurse':
details += '''recurse copy of files\n'''
y = _state_data_to_yaml_string(s)
if y:
details += '```\n{0}\n```\n'.format(y)
if '!doc_recurse' in id_full:
findfiles = __salt__['file.find'](path=s.get('name'), type='f')
if len(findfiles) < 10 or '!doc_recurse_force' in id_full:
for f in findfiles:
details += _format_markdown_system_file(f, config)
else:
details += ''' > Skipping because more than 10 files to display.\n'''
details += ''' > HINT: to force include !doc_recurse_force in state id.\n'''
else:
details += ''' > For more details review logs and Salt state files.\n\n'''
details += ''' > HINT: for improved docs use multiple file.managed states or file.archive, git.latest. etc.\n'''
details += ''' > HINT: to force doc to show all files in path add !doc_recurse .\n'''
if state_function == 'file.blockreplace':
if s.get('content'):
details += 'ensure block of content is in file\n```\n{0}\n```\n'.format(_md_fix(s['content']))
if s.get('source'):
text = '** source: ' + s.get('source')
details += 'ensure block of content is in file\n```\n{0}\n```\n'.format(_md_fix(text))
if state_function == 'file.managed':
details += _format_markdown_system_file(s['name'], config)
# if no state doc is created use default state as yaml
if not details:
y = _state_data_to_yaml_string(s)
if y:
details += '```\n{0}```\n'.format(y)
r = {
'vars': lowstate_item,
'state': s['state'],
'name': s['name'],
'function': s['fun'],
'id': s['__id__'],
'id_full': id_full,
'state_function': state_function,
'markdown': {
'requisites': requisites.decode('utf-8'),
'details': details.decode('utf-8')
}
}
return r | def function[proccesser_markdown, parameter[lowstate_item, config]]:
constant[
Takes low state data and returns a dict of proccessed data
that is by default used in a jinja template when rendering a markdown highstate_doc.
This `lowstate_item_markdown` given a lowstate item, returns a dict like:
.. code-block:: yaml
vars: # the raw lowstate_item that was proccessed
id: # the 'id' of the state.
id_full: # combo of the state type and id "state: id"
state: # name of the salt state module
function: # name of the state function
name: # value of 'name:' passed to the salt state module
state_function: # the state name and function name
markdown: # text data to describe a state
requisites: # requisite like [watch_in, require_in]
details: # state name, parameters and other details like file contents
]
variable[s] assign[=] name[lowstate_item]
variable[state_function] assign[=] call[constant[{0}.{1}].format, parameter[call[name[s]][constant[state]], call[name[s]][constant[fun]]]]
variable[id_full] assign[=] call[constant[{0}: {1}].format, parameter[call[name[s]][constant[state]], call[name[s]][constant[__id__]]]]
variable[requisites] assign[=] constant[]
if call[name[s].get, parameter[constant[watch]]] begin[:]
<ast.AugAssign object at 0x7da1b210b2b0>
for taget[name[w]] in starred[call[name[s].get, parameter[constant[watch], list[[]]]]] begin[:]
<ast.AugAssign object at 0x7da1b210b340>
<ast.AugAssign object at 0x7da1b210b8e0>
if call[name[s].get, parameter[constant[watch_in]]] begin[:]
<ast.AugAssign object at 0x7da1b2108490>
for taget[name[w]] in starred[call[name[s].get, parameter[constant[watch_in], list[[]]]]] begin[:]
<ast.AugAssign object at 0x7da1b210a140>
<ast.AugAssign object at 0x7da1b210b310>
if <ast.BoolOp object at 0x7da1b210b130> begin[:]
<ast.AugAssign object at 0x7da1b210a200>
for taget[name[w]] in starred[call[name[s].get, parameter[constant[require], list[[]]]]] begin[:]
<ast.AugAssign object at 0x7da1b2109660>
<ast.AugAssign object at 0x7da1b2109090>
if call[name[s].get, parameter[constant[require_in]]] begin[:]
<ast.AugAssign object at 0x7da1b2108940>
for taget[name[w]] in starred[call[name[s].get, parameter[constant[require_in], list[[]]]]] begin[:]
<ast.AugAssign object at 0x7da1b2108e50>
<ast.AugAssign object at 0x7da1b2108d90>
variable[details] assign[=] constant[]
if compare[name[state_function] equal[==] constant[highstate_doc.note]] begin[:]
if compare[constant[contents] in name[s]] begin[:]
<ast.AugAssign object at 0x7da1b21094b0>
if compare[constant[source] in name[s]] begin[:]
variable[text] assign[=] call[call[name[__salt__]][constant[cp.get_file_str]], parameter[call[name[s]][constant[source]]]]
if name[text] begin[:]
<ast.AugAssign object at 0x7da1b2109f90>
if compare[name[state_function] equal[==] constant[pkg.installed]] begin[:]
variable[pkgs] assign[=] call[name[s].get, parameter[constant[pkgs], call[name[s].get, parameter[constant[name]]]]]
<ast.AugAssign object at 0x7da1b210b730>
if compare[name[state_function] equal[==] constant[file.recurse]] begin[:]
<ast.AugAssign object at 0x7da1b210bdc0>
variable[y] assign[=] call[name[_state_data_to_yaml_string], parameter[name[s]]]
if name[y] begin[:]
<ast.AugAssign object at 0x7da1b210b700>
if compare[constant[!doc_recurse] in name[id_full]] begin[:]
variable[findfiles] assign[=] call[call[name[__salt__]][constant[file.find]], parameter[]]
if <ast.BoolOp object at 0x7da1b210aef0> begin[:]
for taget[name[f]] in starred[name[findfiles]] begin[:]
<ast.AugAssign object at 0x7da1b2109ab0>
if compare[name[state_function] equal[==] constant[file.blockreplace]] begin[:]
if call[name[s].get, parameter[constant[content]]] begin[:]
<ast.AugAssign object at 0x7da1b1f97c70>
if call[name[s].get, parameter[constant[source]]] begin[:]
variable[text] assign[=] binary_operation[constant[** source: ] + call[name[s].get, parameter[constant[source]]]]
<ast.AugAssign object at 0x7da1b1f94760>
if compare[name[state_function] equal[==] constant[file.managed]] begin[:]
<ast.AugAssign object at 0x7da1b1f94d60>
if <ast.UnaryOp object at 0x7da1b1f95b40> begin[:]
variable[y] assign[=] call[name[_state_data_to_yaml_string], parameter[name[s]]]
if name[y] begin[:]
<ast.AugAssign object at 0x7da1b1f947f0>
variable[r] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f95240>, <ast.Constant object at 0x7da1b1f971f0>, <ast.Constant object at 0x7da1b1f970d0>, <ast.Constant object at 0x7da1b1f94d90>, <ast.Constant object at 0x7da1b1f953c0>, <ast.Constant object at 0x7da1b1f94970>, <ast.Constant object at 0x7da1b1f951e0>, <ast.Constant object at 0x7da1b1f94eb0>], [<ast.Name object at 0x7da1b1f96b00>, <ast.Subscript object at 0x7da1b1f952a0>, <ast.Subscript object at 0x7da1b1f97dc0>, <ast.Subscript object at 0x7da1b1f940d0>, <ast.Subscript object at 0x7da1b1f97160>, <ast.Name object at 0x7da1b1f97190>, <ast.Name object at 0x7da1b1f96ef0>, <ast.Dict object at 0x7da1b1f94850>]]
return[name[r]] | keyword[def] identifier[proccesser_markdown] ( identifier[lowstate_item] , identifier[config] ,** identifier[kwargs] ):
literal[string]
identifier[s] = identifier[lowstate_item]
identifier[state_function] = literal[string] . identifier[format] ( identifier[s] [ literal[string] ], identifier[s] [ literal[string] ])
identifier[id_full] = literal[string] . identifier[format] ( identifier[s] [ literal[string] ], identifier[s] [ literal[string] ])
identifier[requisites] = literal[string]
keyword[if] identifier[s] . identifier[get] ( literal[string] ):
identifier[requisites] += literal[string]
keyword[for] identifier[w] keyword[in] identifier[s] . identifier[get] ( literal[string] ,[]):
identifier[requisites] += identifier[_format_markdown_requisite] ( identifier[w] . identifier[items] ()[ literal[int] ][ literal[int] ], identifier[w] . identifier[items] ()[ literal[int] ][ literal[int] ])
identifier[requisites] += literal[string]
keyword[if] identifier[s] . identifier[get] ( literal[string] ):
identifier[requisites] += literal[string]
keyword[for] identifier[w] keyword[in] identifier[s] . identifier[get] ( literal[string] ,[]):
identifier[requisites] += identifier[_format_markdown_requisite] ( identifier[w] . identifier[items] ()[ literal[int] ][ literal[int] ], identifier[w] . identifier[items] ()[ literal[int] ][ literal[int] ])
identifier[requisites] += literal[string]
keyword[if] identifier[s] . identifier[get] ( literal[string] ) keyword[and] identifier[s] . identifier[get] ( literal[string] ):
identifier[requisites] += literal[string]
keyword[for] identifier[w] keyword[in] identifier[s] . identifier[get] ( literal[string] ,[]):
identifier[requisites] += identifier[_format_markdown_requisite] ( identifier[w] . identifier[items] ()[ literal[int] ][ literal[int] ], identifier[w] . identifier[items] ()[ literal[int] ][ literal[int] ])
identifier[requisites] += literal[string]
keyword[if] identifier[s] . identifier[get] ( literal[string] ):
identifier[requisites] += literal[string]
keyword[for] identifier[w] keyword[in] identifier[s] . identifier[get] ( literal[string] ,[]):
identifier[requisites] += identifier[_format_markdown_requisite] ( identifier[w] . identifier[items] ()[ literal[int] ][ literal[int] ], identifier[w] . identifier[items] ()[ literal[int] ][ literal[int] ])
identifier[requisites] += literal[string]
identifier[details] = literal[string]
keyword[if] identifier[state_function] == literal[string] :
keyword[if] literal[string] keyword[in] identifier[s] :
identifier[details] += literal[string] . identifier[format] ( identifier[s] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[s] :
identifier[text] = identifier[__salt__] [ literal[string] ]( identifier[s] [ literal[string] ])
keyword[if] identifier[text] :
identifier[details] += literal[string] . identifier[format] ( identifier[text] )
keyword[else] :
identifier[details] += literal[string] . identifier[format] ( literal[string] . identifier[format] ( identifier[s] [ literal[string] ]))
keyword[if] identifier[state_function] == literal[string] :
identifier[pkgs] = identifier[s] . identifier[get] ( literal[string] , identifier[s] . identifier[get] ( literal[string] ))
identifier[details] += literal[string] . identifier[format] ( identifier[pkgs] )
keyword[if] identifier[state_function] == literal[string] :
identifier[details] += literal[string]
identifier[y] = identifier[_state_data_to_yaml_string] ( identifier[s] )
keyword[if] identifier[y] :
identifier[details] += literal[string] . identifier[format] ( identifier[y] )
keyword[if] literal[string] keyword[in] identifier[id_full] :
identifier[findfiles] = identifier[__salt__] [ literal[string] ]( identifier[path] = identifier[s] . identifier[get] ( literal[string] ), identifier[type] = literal[string] )
keyword[if] identifier[len] ( identifier[findfiles] )< literal[int] keyword[or] literal[string] keyword[in] identifier[id_full] :
keyword[for] identifier[f] keyword[in] identifier[findfiles] :
identifier[details] += identifier[_format_markdown_system_file] ( identifier[f] , identifier[config] )
keyword[else] :
identifier[details] += literal[string]
identifier[details] += literal[string]
keyword[else] :
identifier[details] += literal[string]
identifier[details] += literal[string]
identifier[details] += literal[string]
keyword[if] identifier[state_function] == literal[string] :
keyword[if] identifier[s] . identifier[get] ( literal[string] ):
identifier[details] += literal[string] . identifier[format] ( identifier[_md_fix] ( identifier[s] [ literal[string] ]))
keyword[if] identifier[s] . identifier[get] ( literal[string] ):
identifier[text] = literal[string] + identifier[s] . identifier[get] ( literal[string] )
identifier[details] += literal[string] . identifier[format] ( identifier[_md_fix] ( identifier[text] ))
keyword[if] identifier[state_function] == literal[string] :
identifier[details] += identifier[_format_markdown_system_file] ( identifier[s] [ literal[string] ], identifier[config] )
keyword[if] keyword[not] identifier[details] :
identifier[y] = identifier[_state_data_to_yaml_string] ( identifier[s] )
keyword[if] identifier[y] :
identifier[details] += literal[string] . identifier[format] ( identifier[y] )
identifier[r] ={
literal[string] : identifier[lowstate_item] ,
literal[string] : identifier[s] [ literal[string] ],
literal[string] : identifier[s] [ literal[string] ],
literal[string] : identifier[s] [ literal[string] ],
literal[string] : identifier[s] [ literal[string] ],
literal[string] : identifier[id_full] ,
literal[string] : identifier[state_function] ,
literal[string] :{
literal[string] : identifier[requisites] . identifier[decode] ( literal[string] ),
literal[string] : identifier[details] . identifier[decode] ( literal[string] )
}
}
keyword[return] identifier[r] | def proccesser_markdown(lowstate_item, config, **kwargs):
"""
Takes low state data and returns a dict of proccessed data
that is by default used in a jinja template when rendering a markdown highstate_doc.
This `lowstate_item_markdown` given a lowstate item, returns a dict like:
.. code-block:: yaml
vars: # the raw lowstate_item that was proccessed
id: # the 'id' of the state.
id_full: # combo of the state type and id "state: id"
state: # name of the salt state module
function: # name of the state function
name: # value of 'name:' passed to the salt state module
state_function: # the state name and function name
markdown: # text data to describe a state
requisites: # requisite like [watch_in, require_in]
details: # state name, parameters and other details like file contents
"""
# TODO: switch or ... ext call.
s = lowstate_item
state_function = '{0}.{1}'.format(s['state'], s['fun'])
id_full = '{0}: {1}'.format(s['state'], s['__id__'])
# TODO: use salt defined STATE_REQUISITE_IN_KEYWORDS
requisites = ''
if s.get('watch'):
requisites += 'run or update after changes in:\n'
for w in s.get('watch', []):
requisites += _format_markdown_requisite(w.items()[0][0], w.items()[0][1]) # depends on [control=['for'], data=['w']]
requisites += '\n' # depends on [control=['if'], data=[]]
if s.get('watch_in'):
requisites += 'after changes, run or update:\n'
for w in s.get('watch_in', []):
requisites += _format_markdown_requisite(w.items()[0][0], w.items()[0][1]) # depends on [control=['for'], data=['w']]
requisites += '\n' # depends on [control=['if'], data=[]]
if s.get('require') and s.get('require'):
requisites += 'require:\n'
for w in s.get('require', []):
requisites += _format_markdown_requisite(w.items()[0][0], w.items()[0][1]) # depends on [control=['for'], data=['w']]
requisites += '\n' # depends on [control=['if'], data=[]]
if s.get('require_in'):
requisites += 'required in:\n'
for w in s.get('require_in', []):
requisites += _format_markdown_requisite(w.items()[0][0], w.items()[0][1]) # depends on [control=['for'], data=['w']]
requisites += '\n' # depends on [control=['if'], data=[]]
details = ''
if state_function == 'highstate_doc.note':
if 'contents' in s:
details += '\n{0}\n'.format(s['contents']) # depends on [control=['if'], data=['s']]
if 'source' in s:
text = __salt__['cp.get_file_str'](s['source'])
if text:
details += '\n{0}\n'.format(text) # depends on [control=['if'], data=[]]
else:
details += '\n{0}\n'.format('ERROR: opening {0}'.format(s['source'])) # depends on [control=['if'], data=['s']] # depends on [control=['if'], data=[]]
if state_function == 'pkg.installed':
pkgs = s.get('pkgs', s.get('name'))
details += '\n```\ninstall: {0}\n```\n'.format(pkgs) # depends on [control=['if'], data=[]]
if state_function == 'file.recurse':
details += 'recurse copy of files\n'
y = _state_data_to_yaml_string(s)
if y:
details += '```\n{0}\n```\n'.format(y) # depends on [control=['if'], data=[]]
if '!doc_recurse' in id_full:
findfiles = __salt__['file.find'](path=s.get('name'), type='f')
if len(findfiles) < 10 or '!doc_recurse_force' in id_full:
for f in findfiles:
details += _format_markdown_system_file(f, config) # depends on [control=['for'], data=['f']] # depends on [control=['if'], data=[]]
else:
details += ' > Skipping because more than 10 files to display.\n'
details += ' > HINT: to force include !doc_recurse_force in state id.\n' # depends on [control=['if'], data=['id_full']]
else:
details += ' > For more details review logs and Salt state files.\n\n'
details += ' > HINT: for improved docs use multiple file.managed states or file.archive, git.latest. etc.\n'
details += ' > HINT: to force doc to show all files in path add !doc_recurse .\n' # depends on [control=['if'], data=[]]
if state_function == 'file.blockreplace':
if s.get('content'):
details += 'ensure block of content is in file\n```\n{0}\n```\n'.format(_md_fix(s['content'])) # depends on [control=['if'], data=[]]
if s.get('source'):
text = '** source: ' + s.get('source')
details += 'ensure block of content is in file\n```\n{0}\n```\n'.format(_md_fix(text)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if state_function == 'file.managed':
details += _format_markdown_system_file(s['name'], config) # depends on [control=['if'], data=[]]
# if no state doc is created use default state as yaml
if not details:
y = _state_data_to_yaml_string(s)
if y:
details += '```\n{0}```\n'.format(y) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
r = {'vars': lowstate_item, 'state': s['state'], 'name': s['name'], 'function': s['fun'], 'id': s['__id__'], 'id_full': id_full, 'state_function': state_function, 'markdown': {'requisites': requisites.decode('utf-8'), 'details': details.decode('utf-8')}}
return r |
def clear_imgs(self) -> None:
"Clear the widget's images preview pane."
self._preview_header.value = self._heading
self._img_pane.children = tuple() | def function[clear_imgs, parameter[self]]:
constant[Clear the widget's images preview pane.]
name[self]._preview_header.value assign[=] name[self]._heading
name[self]._img_pane.children assign[=] call[name[tuple], parameter[]] | keyword[def] identifier[clear_imgs] ( identifier[self] )-> keyword[None] :
literal[string]
identifier[self] . identifier[_preview_header] . identifier[value] = identifier[self] . identifier[_heading]
identifier[self] . identifier[_img_pane] . identifier[children] = identifier[tuple] () | def clear_imgs(self) -> None:
"""Clear the widget's images preview pane."""
self._preview_header.value = self._heading
self._img_pane.children = tuple() |
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, dt)
price = self._spot_col(field)[ix]
if field != 'volume':
if price == 0:
return nan
else:
return price * 0.001
else:
return price | def function[get_value, parameter[self, sid, dt, field]]:
constant[
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
]
variable[ix] assign[=] call[name[self].sid_day_index, parameter[name[sid], name[dt]]]
variable[price] assign[=] call[call[name[self]._spot_col, parameter[name[field]]]][name[ix]]
if compare[name[field] not_equal[!=] constant[volume]] begin[:]
if compare[name[price] equal[==] constant[0]] begin[:]
return[name[nan]] | keyword[def] identifier[get_value] ( identifier[self] , identifier[sid] , identifier[dt] , identifier[field] ):
literal[string]
identifier[ix] = identifier[self] . identifier[sid_day_index] ( identifier[sid] , identifier[dt] )
identifier[price] = identifier[self] . identifier[_spot_col] ( identifier[field] )[ identifier[ix] ]
keyword[if] identifier[field] != literal[string] :
keyword[if] identifier[price] == literal[int] :
keyword[return] identifier[nan]
keyword[else] :
keyword[return] identifier[price] * literal[int]
keyword[else] :
keyword[return] identifier[price] | def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, dt)
price = self._spot_col(field)[ix]
if field != 'volume':
if price == 0:
return nan # depends on [control=['if'], data=[]]
else:
return price * 0.001 # depends on [control=['if'], data=[]]
else:
return price |
def coord(self, offset=(0,0)):
'''return lat,lon within a tile given (offsetx,offsety)'''
(tilex, tiley) = self.tile
(offsetx, offsety) = offset
world_tiles = 1<<self.zoom
x = ( tilex + 1.0*offsetx/TILES_WIDTH ) / (world_tiles/2.) - 1
y = ( tiley + 1.0*offsety/TILES_HEIGHT) / (world_tiles/2.) - 1
lon = x * 180.0
y = math.exp(-y*2*math.pi)
e = (y-1)/(y+1)
lat = 180.0/math.pi * math.asin(e)
return (lat, lon) | def function[coord, parameter[self, offset]]:
constant[return lat,lon within a tile given (offsetx,offsety)]
<ast.Tuple object at 0x7da1b1790310> assign[=] name[self].tile
<ast.Tuple object at 0x7da1b17912a0> assign[=] name[offset]
variable[world_tiles] assign[=] binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> name[self].zoom]
variable[x] assign[=] binary_operation[binary_operation[binary_operation[name[tilex] + binary_operation[binary_operation[constant[1.0] * name[offsetx]] / name[TILES_WIDTH]]] / binary_operation[name[world_tiles] / constant[2.0]]] - constant[1]]
variable[y] assign[=] binary_operation[binary_operation[binary_operation[name[tiley] + binary_operation[binary_operation[constant[1.0] * name[offsety]] / name[TILES_HEIGHT]]] / binary_operation[name[world_tiles] / constant[2.0]]] - constant[1]]
variable[lon] assign[=] binary_operation[name[x] * constant[180.0]]
variable[y] assign[=] call[name[math].exp, parameter[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b1792140> * constant[2]] * name[math].pi]]]
variable[e] assign[=] binary_operation[binary_operation[name[y] - constant[1]] / binary_operation[name[y] + constant[1]]]
variable[lat] assign[=] binary_operation[binary_operation[constant[180.0] / name[math].pi] * call[name[math].asin, parameter[name[e]]]]
return[tuple[[<ast.Name object at 0x7da20c6a85e0>, <ast.Name object at 0x7da20c6ab190>]]] | keyword[def] identifier[coord] ( identifier[self] , identifier[offset] =( literal[int] , literal[int] )):
literal[string]
( identifier[tilex] , identifier[tiley] )= identifier[self] . identifier[tile]
( identifier[offsetx] , identifier[offsety] )= identifier[offset]
identifier[world_tiles] = literal[int] << identifier[self] . identifier[zoom]
identifier[x] =( identifier[tilex] + literal[int] * identifier[offsetx] / identifier[TILES_WIDTH] )/( identifier[world_tiles] / literal[int] )- literal[int]
identifier[y] =( identifier[tiley] + literal[int] * identifier[offsety] / identifier[TILES_HEIGHT] )/( identifier[world_tiles] / literal[int] )- literal[int]
identifier[lon] = identifier[x] * literal[int]
identifier[y] = identifier[math] . identifier[exp] (- identifier[y] * literal[int] * identifier[math] . identifier[pi] )
identifier[e] =( identifier[y] - literal[int] )/( identifier[y] + literal[int] )
identifier[lat] = literal[int] / identifier[math] . identifier[pi] * identifier[math] . identifier[asin] ( identifier[e] )
keyword[return] ( identifier[lat] , identifier[lon] ) | def coord(self, offset=(0, 0)):
"""return lat,lon within a tile given (offsetx,offsety)"""
(tilex, tiley) = self.tile
(offsetx, offsety) = offset
world_tiles = 1 << self.zoom
x = (tilex + 1.0 * offsetx / TILES_WIDTH) / (world_tiles / 2.0) - 1
y = (tiley + 1.0 * offsety / TILES_HEIGHT) / (world_tiles / 2.0) - 1
lon = x * 180.0
y = math.exp(-y * 2 * math.pi)
e = (y - 1) / (y + 1)
lat = 180.0 / math.pi * math.asin(e)
return (lat, lon) |
def get_proc_outputs(self):
"""
If stored procedure has result sets and OUTPUT parameters use this method
after you processed all result sets to get values of OUTPUT parameters.
:return: A list of output parameter values.
"""
self._session.complete_rpc()
results = [None] * len(self._session.output_params.items())
for key, param in self._session.output_params.items():
results[key] = param.value
return results | def function[get_proc_outputs, parameter[self]]:
constant[
If stored procedure has result sets and OUTPUT parameters use this method
after you processed all result sets to get values of OUTPUT parameters.
:return: A list of output parameter values.
]
call[name[self]._session.complete_rpc, parameter[]]
variable[results] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0578ac0>]] * call[name[len], parameter[call[name[self]._session.output_params.items, parameter[]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0578a90>, <ast.Name object at 0x7da1b057a560>]]] in starred[call[name[self]._session.output_params.items, parameter[]]] begin[:]
call[name[results]][name[key]] assign[=] name[param].value
return[name[results]] | keyword[def] identifier[get_proc_outputs] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_session] . identifier[complete_rpc] ()
identifier[results] =[ keyword[None] ]* identifier[len] ( identifier[self] . identifier[_session] . identifier[output_params] . identifier[items] ())
keyword[for] identifier[key] , identifier[param] keyword[in] identifier[self] . identifier[_session] . identifier[output_params] . identifier[items] ():
identifier[results] [ identifier[key] ]= identifier[param] . identifier[value]
keyword[return] identifier[results] | def get_proc_outputs(self):
"""
If stored procedure has result sets and OUTPUT parameters use this method
after you processed all result sets to get values of OUTPUT parameters.
:return: A list of output parameter values.
"""
self._session.complete_rpc()
results = [None] * len(self._session.output_params.items())
for (key, param) in self._session.output_params.items():
results[key] = param.value # depends on [control=['for'], data=[]]
return results |
def getscript(self, name):
"""Download a script from the server
See MANAGESIEVE specifications, section 2.9
:param name: script's name
:rtype: string
:returns: the script's content on succes, None otherwise
"""
code, data, content = self.__send_command(
"GETSCRIPT", [name.encode("utf-8")], withcontent=True)
if code == "OK":
lines = content.splitlines()
if self.__size_expr.match(lines[0]) is not None:
lines = lines[1:]
return u"\n".join([line.decode("utf-8") for line in lines])
return None | def function[getscript, parameter[self, name]]:
constant[Download a script from the server
See MANAGESIEVE specifications, section 2.9
:param name: script's name
:rtype: string
:returns: the script's content on succes, None otherwise
]
<ast.Tuple object at 0x7da2041da500> assign[=] call[name[self].__send_command, parameter[constant[GETSCRIPT], list[[<ast.Call object at 0x7da2041da020>]]]]
if compare[name[code] equal[==] constant[OK]] begin[:]
variable[lines] assign[=] call[name[content].splitlines, parameter[]]
if compare[call[name[self].__size_expr.match, parameter[call[name[lines]][constant[0]]]] is_not constant[None]] begin[:]
variable[lines] assign[=] call[name[lines]][<ast.Slice object at 0x7da2041d86a0>]
return[call[constant[
].join, parameter[<ast.ListComp object at 0x7da2041daf80>]]]
return[constant[None]] | keyword[def] identifier[getscript] ( identifier[self] , identifier[name] ):
literal[string]
identifier[code] , identifier[data] , identifier[content] = identifier[self] . identifier[__send_command] (
literal[string] ,[ identifier[name] . identifier[encode] ( literal[string] )], identifier[withcontent] = keyword[True] )
keyword[if] identifier[code] == literal[string] :
identifier[lines] = identifier[content] . identifier[splitlines] ()
keyword[if] identifier[self] . identifier[__size_expr] . identifier[match] ( identifier[lines] [ literal[int] ]) keyword[is] keyword[not] keyword[None] :
identifier[lines] = identifier[lines] [ literal[int] :]
keyword[return] literal[string] . identifier[join] ([ identifier[line] . identifier[decode] ( literal[string] ) keyword[for] identifier[line] keyword[in] identifier[lines] ])
keyword[return] keyword[None] | def getscript(self, name):
"""Download a script from the server
See MANAGESIEVE specifications, section 2.9
:param name: script's name
:rtype: string
:returns: the script's content on succes, None otherwise
"""
(code, data, content) = self.__send_command('GETSCRIPT', [name.encode('utf-8')], withcontent=True)
if code == 'OK':
lines = content.splitlines()
if self.__size_expr.match(lines[0]) is not None:
lines = lines[1:] # depends on [control=['if'], data=[]]
return u'\n'.join([line.decode('utf-8') for line in lines]) # depends on [control=['if'], data=[]]
return None |
def dump_cookie(
key,
value="",
max_age=None,
expires=None,
path="/",
domain=None,
secure=False,
httponly=False,
charset="utf-8",
sync_expires=True,
max_size=4093,
samesite=None,
):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
:param max_size: Warn if the final header value exceeds this size. The
default, 4093, should be safely `supported by most browsers
<cookie_>`_. Set to 0 to disable this check.
:param samesite: Limits the scope of the cookie such that it will only
be attached to requests if those requests are "same-site".
.. _`cookie`: http://browsercookielimits.squawky.net/
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
samesite = samesite.title() if samesite else None
if samesite not in ("Strict", "Lax", None):
raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None")
buf = [key + b"=" + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in (
(b"Domain", domain, True),
(b"Expires", expires, False),
(b"Max-Age", max_age, False),
(b"Secure", secure, None),
(b"HttpOnly", httponly, None),
(b"Path", path, False),
(b"SameSite", samesite, False),
):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b"=" + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b"; ".join(buf)
if not PY2:
rv = rv.decode("latin1")
# Warn if the final value of the cookie is less than the limit. If the
# cookie is too large, then it may be silently ignored, which can be quite
# hard to debug.
cookie_size = len(rv)
if max_size and cookie_size > max_size:
value_size = len(value)
warnings.warn(
'The "{key}" cookie is too large: the value was {value_size} bytes'
" but the header required {extra_size} extra bytes. The final size"
" was {cookie_size} bytes but the limit is {max_size} bytes."
" Browsers may silently ignore cookies larger than this.".format(
key=key,
value_size=value_size,
extra_size=cookie_size - value_size,
cookie_size=cookie_size,
max_size=max_size,
),
stacklevel=2,
)
return rv | def function[dump_cookie, parameter[key, value, max_age, expires, path, domain, secure, httponly, charset, sync_expires, max_size, samesite]]:
constant[Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
:param max_size: Warn if the final header value exceeds this size. The
default, 4093, should be safely `supported by most browsers
<cookie_>`_. Set to 0 to disable this check.
:param samesite: Limits the scope of the cookie such that it will only
be attached to requests if those requests are "same-site".
.. _`cookie`: http://browsercookielimits.squawky.net/
]
variable[key] assign[=] call[name[to_bytes], parameter[name[key], name[charset]]]
variable[value] assign[=] call[name[to_bytes], parameter[name[value], name[charset]]]
if compare[name[path] is_not constant[None]] begin[:]
variable[path] assign[=] call[name[iri_to_uri], parameter[name[path], name[charset]]]
variable[domain] assign[=] call[name[_make_cookie_domain], parameter[name[domain]]]
if call[name[isinstance], parameter[name[max_age], name[timedelta]]] begin[:]
variable[max_age] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[max_age].days * constant[60]] * constant[60]] * constant[24]] + name[max_age].seconds]
if compare[name[expires] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da20c6aa950> begin[:]
variable[expires] assign[=] call[name[cookie_date], parameter[name[expires]]]
variable[samesite] assign[=] <ast.IfExp object at 0x7da20c6a8040>
if compare[name[samesite] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c6aa110>, <ast.Constant object at 0x7da20c6a9330>, <ast.Constant object at 0x7da20c6abf10>]]] begin[:]
<ast.Raise object at 0x7da18bccb8b0>
variable[buf] assign[=] list[[<ast.BinOp object at 0x7da18bccafe0>]]
for taget[tuple[[<ast.Name object at 0x7da18bcc86d0>, <ast.Name object at 0x7da18f58dae0>, <ast.Name object at 0x7da18f58fdf0>]]] in starred[tuple[[<ast.Tuple object at 0x7da18f58c7f0>, <ast.Tuple object at 0x7da18f58f970>, <ast.Tuple object at 0x7da18f58ec20>, <ast.Tuple object at 0x7da18f58fd30>, <ast.Tuple object at 0x7da18f58e530>, <ast.Tuple object at 0x7da18f58de70>, <ast.Tuple object at 0x7da18f58d7e0>]]] begin[:]
if compare[name[q] is constant[None]] begin[:]
if name[v] begin[:]
call[name[buf].append, parameter[name[k]]]
continue
if compare[name[v] is constant[None]] begin[:]
continue
variable[tmp] assign[=] call[name[bytearray], parameter[name[k]]]
if <ast.UnaryOp object at 0x7da20c6a9180> begin[:]
variable[v] assign[=] call[name[to_bytes], parameter[call[name[text_type], parameter[name[v]]], name[charset]]]
if name[q] begin[:]
variable[v] assign[=] call[name[_cookie_quote], parameter[name[v]]]
<ast.AugAssign object at 0x7da20c6aac20>
call[name[buf].append, parameter[call[name[bytes], parameter[name[tmp]]]]]
variable[rv] assign[=] call[constant[b'; '].join, parameter[name[buf]]]
if <ast.UnaryOp object at 0x7da20c6aab00> begin[:]
variable[rv] assign[=] call[name[rv].decode, parameter[constant[latin1]]]
variable[cookie_size] assign[=] call[name[len], parameter[name[rv]]]
if <ast.BoolOp object at 0x7da20c6a8220> begin[:]
variable[value_size] assign[=] call[name[len], parameter[name[value]]]
call[name[warnings].warn, parameter[call[constant[The "{key}" cookie is too large: the value was {value_size} bytes but the header required {extra_size} extra bytes. The final size was {cookie_size} bytes but the limit is {max_size} bytes. Browsers may silently ignore cookies larger than this.].format, parameter[]]]]
return[name[rv]] | keyword[def] identifier[dump_cookie] (
identifier[key] ,
identifier[value] = literal[string] ,
identifier[max_age] = keyword[None] ,
identifier[expires] = keyword[None] ,
identifier[path] = literal[string] ,
identifier[domain] = keyword[None] ,
identifier[secure] = keyword[False] ,
identifier[httponly] = keyword[False] ,
identifier[charset] = literal[string] ,
identifier[sync_expires] = keyword[True] ,
identifier[max_size] = literal[int] ,
identifier[samesite] = keyword[None] ,
):
literal[string]
identifier[key] = identifier[to_bytes] ( identifier[key] , identifier[charset] )
identifier[value] = identifier[to_bytes] ( identifier[value] , identifier[charset] )
keyword[if] identifier[path] keyword[is] keyword[not] keyword[None] :
identifier[path] = identifier[iri_to_uri] ( identifier[path] , identifier[charset] )
identifier[domain] = identifier[_make_cookie_domain] ( identifier[domain] )
keyword[if] identifier[isinstance] ( identifier[max_age] , identifier[timedelta] ):
identifier[max_age] =( identifier[max_age] . identifier[days] * literal[int] * literal[int] * literal[int] )+ identifier[max_age] . identifier[seconds]
keyword[if] identifier[expires] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[expires] , identifier[string_types] ):
identifier[expires] = identifier[cookie_date] ( identifier[expires] )
keyword[elif] identifier[max_age] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sync_expires] :
identifier[expires] = identifier[to_bytes] ( identifier[cookie_date] ( identifier[time] ()+ identifier[max_age] ))
identifier[samesite] = identifier[samesite] . identifier[title] () keyword[if] identifier[samesite] keyword[else] keyword[None]
keyword[if] identifier[samesite] keyword[not] keyword[in] ( literal[string] , literal[string] , keyword[None] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[buf] =[ identifier[key] + literal[string] + identifier[_cookie_quote] ( identifier[value] )]
keyword[for] identifier[k] , identifier[v] , identifier[q] keyword[in] (
( literal[string] , identifier[domain] , keyword[True] ),
( literal[string] , identifier[expires] , keyword[False] ),
( literal[string] , identifier[max_age] , keyword[False] ),
( literal[string] , identifier[secure] , keyword[None] ),
( literal[string] , identifier[httponly] , keyword[None] ),
( literal[string] , identifier[path] , keyword[False] ),
( literal[string] , identifier[samesite] , keyword[False] ),
):
keyword[if] identifier[q] keyword[is] keyword[None] :
keyword[if] identifier[v] :
identifier[buf] . identifier[append] ( identifier[k] )
keyword[continue]
keyword[if] identifier[v] keyword[is] keyword[None] :
keyword[continue]
identifier[tmp] = identifier[bytearray] ( identifier[k] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] ,( identifier[bytes] , identifier[bytearray] )):
identifier[v] = identifier[to_bytes] ( identifier[text_type] ( identifier[v] ), identifier[charset] )
keyword[if] identifier[q] :
identifier[v] = identifier[_cookie_quote] ( identifier[v] )
identifier[tmp] += literal[string] + identifier[v]
identifier[buf] . identifier[append] ( identifier[bytes] ( identifier[tmp] ))
identifier[rv] = literal[string] . identifier[join] ( identifier[buf] )
keyword[if] keyword[not] identifier[PY2] :
identifier[rv] = identifier[rv] . identifier[decode] ( literal[string] )
identifier[cookie_size] = identifier[len] ( identifier[rv] )
keyword[if] identifier[max_size] keyword[and] identifier[cookie_size] > identifier[max_size] :
identifier[value_size] = identifier[len] ( identifier[value] )
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[key] = identifier[key] ,
identifier[value_size] = identifier[value_size] ,
identifier[extra_size] = identifier[cookie_size] - identifier[value_size] ,
identifier[cookie_size] = identifier[cookie_size] ,
identifier[max_size] = identifier[max_size] ,
),
identifier[stacklevel] = literal[int] ,
)
keyword[return] identifier[rv] | def dump_cookie(key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False, charset='utf-8', sync_expires=True, max_size=4093, samesite=None):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
:param max_size: Warn if the final header value exceeds this size. The
default, 4093, should be safely `supported by most browsers
<cookie_>`_. Set to 0 to disable this check.
:param samesite: Limits the scope of the cookie such that it will only
be attached to requests if those requests are "same-site".
.. _`cookie`: http://browsercookielimits.squawky.net/
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset) # depends on [control=['if'], data=['path']]
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = max_age.days * 60 * 60 * 24 + max_age.seconds # depends on [control=['if'], data=[]]
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['expires']]
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age)) # depends on [control=['if'], data=[]]
samesite = samesite.title() if samesite else None
if samesite not in ('Strict', 'Lax', None):
raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None") # depends on [control=['if'], data=[]]
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for (k, v, q) in ((b'Domain', domain, True), (b'Expires', expires, False), (b'Max-Age', max_age, False), (b'Secure', secure, None), (b'HttpOnly', httponly, None), (b'Path', path, False), (b'SameSite', samesite, False)):
if q is None:
if v:
buf.append(k) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=[]]
if v is None:
continue # depends on [control=['if'], data=[]]
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset) # depends on [control=['if'], data=[]]
if q:
v = _cookie_quote(v) # depends on [control=['if'], data=[]]
tmp += b'=' + v
buf.append(bytes(tmp)) # depends on [control=['for'], data=[]]
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1') # depends on [control=['if'], data=[]]
# Warn if the final value of the cookie is less than the limit. If the
# cookie is too large, then it may be silently ignored, which can be quite
# hard to debug.
cookie_size = len(rv)
if max_size and cookie_size > max_size:
value_size = len(value)
warnings.warn('The "{key}" cookie is too large: the value was {value_size} bytes but the header required {extra_size} extra bytes. The final size was {cookie_size} bytes but the limit is {max_size} bytes. Browsers may silently ignore cookies larger than this.'.format(key=key, value_size=value_size, extra_size=cookie_size - value_size, cookie_size=cookie_size, max_size=max_size), stacklevel=2) # depends on [control=['if'], data=[]]
return rv |
def _set_up_reference_fields(new_class):
"""Walk through relation fields and setup shadow attributes"""
if new_class.meta_.declared_fields:
for _, field in new_class.meta_.declared_fields.items():
if isinstance(field, Reference):
shadow_field_name, shadow_field = field.get_shadow_field()
setattr(new_class, shadow_field_name, shadow_field)
shadow_field.__set_name__(new_class, shadow_field_name) | def function[_set_up_reference_fields, parameter[new_class]]:
constant[Walk through relation fields and setup shadow attributes]
if name[new_class].meta_.declared_fields begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c76d570>, <ast.Name object at 0x7da20c76d2a0>]]] in starred[call[name[new_class].meta_.declared_fields.items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[field], name[Reference]]] begin[:]
<ast.Tuple object at 0x7da20c76f400> assign[=] call[name[field].get_shadow_field, parameter[]]
call[name[setattr], parameter[name[new_class], name[shadow_field_name], name[shadow_field]]]
call[name[shadow_field].__set_name__, parameter[name[new_class], name[shadow_field_name]]] | keyword[def] identifier[_set_up_reference_fields] ( identifier[new_class] ):
literal[string]
keyword[if] identifier[new_class] . identifier[meta_] . identifier[declared_fields] :
keyword[for] identifier[_] , identifier[field] keyword[in] identifier[new_class] . identifier[meta_] . identifier[declared_fields] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[field] , identifier[Reference] ):
identifier[shadow_field_name] , identifier[shadow_field] = identifier[field] . identifier[get_shadow_field] ()
identifier[setattr] ( identifier[new_class] , identifier[shadow_field_name] , identifier[shadow_field] )
identifier[shadow_field] . identifier[__set_name__] ( identifier[new_class] , identifier[shadow_field_name] ) | def _set_up_reference_fields(new_class):
"""Walk through relation fields and setup shadow attributes"""
if new_class.meta_.declared_fields:
for (_, field) in new_class.meta_.declared_fields.items():
if isinstance(field, Reference):
(shadow_field_name, shadow_field) = field.get_shadow_field()
setattr(new_class, shadow_field_name, shadow_field)
shadow_field.__set_name__(new_class, shadow_field_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def _read_mode_route(self, size, kind):
"""Read options with route data.
Positional arguments:
* size - int, length of option
* kind - int, 7/131/137 (RR/LSR/SSR)
Returns:
* dict -- extracted option with route data
Structure of these options:
* [RFC 791] Loose Source Route
+--------+--------+--------+---------//--------+
|10000011| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Strict Source Route
+--------+--------+--------+---------//--------+
|10001001| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Record Route
+--------+--------+--------+---------//--------+
|00000111| length | pointer| route data |
+--------+--------+--------+---------//--------+
Octets Bits Name Description
0 0 ip.opt.kind Kind (7/131/137)
0 0 ip.opt.type.copy Copied Flag (0)
0 1 ip.opt.type.class Option Class (0/1)
0 3 ip.opt.type.number Option Number (3/7/9)
1 8 ip.opt.length Length
2 16 ip.opt.pointer Pointer (≥4)
3 24 ip.opt.data Route Data
"""
if size < 3 or (size - 3) % 4 != 0:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
_rptr = self._read_unpack(1)
if _rptr < 4:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format')
data = dict(
kind=kind,
type=self._read_opt_type(kind),
length=size,
pointer=_rptr,
)
counter = 4
address = list()
endpoint = min(_rptr, size)
while counter < endpoint:
counter += 4
address.append(self._read_ipv4_addr())
data['ip'] = address or None
return data | def function[_read_mode_route, parameter[self, size, kind]]:
constant[Read options with route data.
Positional arguments:
* size - int, length of option
* kind - int, 7/131/137 (RR/LSR/SSR)
Returns:
* dict -- extracted option with route data
Structure of these options:
* [RFC 791] Loose Source Route
+--------+--------+--------+---------//--------+
|10000011| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Strict Source Route
+--------+--------+--------+---------//--------+
|10001001| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Record Route
+--------+--------+--------+---------//--------+
|00000111| length | pointer| route data |
+--------+--------+--------+---------//--------+
Octets Bits Name Description
0 0 ip.opt.kind Kind (7/131/137)
0 0 ip.opt.type.copy Copied Flag (0)
0 1 ip.opt.type.class Option Class (0/1)
0 3 ip.opt.type.number Option Number (3/7/9)
1 8 ip.opt.length Length
2 16 ip.opt.pointer Pointer (≥4)
3 24 ip.opt.data Route Data
]
if <ast.BoolOp object at 0x7da20c6c67a0> begin[:]
<ast.Raise object at 0x7da1b07d0670>
variable[_rptr] assign[=] call[name[self]._read_unpack, parameter[constant[1]]]
if compare[name[_rptr] less[<] constant[4]] begin[:]
<ast.Raise object at 0x7da1b07d1120>
variable[data] assign[=] call[name[dict], parameter[]]
variable[counter] assign[=] constant[4]
variable[address] assign[=] call[name[list], parameter[]]
variable[endpoint] assign[=] call[name[min], parameter[name[_rptr], name[size]]]
while compare[name[counter] less[<] name[endpoint]] begin[:]
<ast.AugAssign object at 0x7da1b07d1c60>
call[name[address].append, parameter[call[name[self]._read_ipv4_addr, parameter[]]]]
call[name[data]][constant[ip]] assign[=] <ast.BoolOp object at 0x7da1b07d1000>
return[name[data]] | keyword[def] identifier[_read_mode_route] ( identifier[self] , identifier[size] , identifier[kind] ):
literal[string]
keyword[if] identifier[size] < literal[int] keyword[or] ( identifier[size] - literal[int] )% literal[int] != literal[int] :
keyword[raise] identifier[ProtocolError] ( literal[string] )
identifier[_rptr] = identifier[self] . identifier[_read_unpack] ( literal[int] )
keyword[if] identifier[_rptr] < literal[int] :
keyword[raise] identifier[ProtocolError] ( literal[string] )
identifier[data] = identifier[dict] (
identifier[kind] = identifier[kind] ,
identifier[type] = identifier[self] . identifier[_read_opt_type] ( identifier[kind] ),
identifier[length] = identifier[size] ,
identifier[pointer] = identifier[_rptr] ,
)
identifier[counter] = literal[int]
identifier[address] = identifier[list] ()
identifier[endpoint] = identifier[min] ( identifier[_rptr] , identifier[size] )
keyword[while] identifier[counter] < identifier[endpoint] :
identifier[counter] += literal[int]
identifier[address] . identifier[append] ( identifier[self] . identifier[_read_ipv4_addr] ())
identifier[data] [ literal[string] ]= identifier[address] keyword[or] keyword[None]
keyword[return] identifier[data] | def _read_mode_route(self, size, kind):
"""Read options with route data.
Positional arguments:
* size - int, length of option
* kind - int, 7/131/137 (RR/LSR/SSR)
Returns:
* dict -- extracted option with route data
Structure of these options:
* [RFC 791] Loose Source Route
+--------+--------+--------+---------//--------+
|10000011| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Strict Source Route
+--------+--------+--------+---------//--------+
|10001001| length | pointer| route data |
+--------+--------+--------+---------//--------+
* [RFC 791] Record Route
+--------+--------+--------+---------//--------+
|00000111| length | pointer| route data |
+--------+--------+--------+---------//--------+
Octets Bits Name Description
0 0 ip.opt.kind Kind (7/131/137)
0 0 ip.opt.type.copy Copied Flag (0)
0 1 ip.opt.type.class Option Class (0/1)
0 3 ip.opt.type.number Option Number (3/7/9)
1 8 ip.opt.length Length
2 16 ip.opt.pointer Pointer (≥4)
3 24 ip.opt.data Route Data
"""
if size < 3 or (size - 3) % 4 != 0:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') # depends on [control=['if'], data=[]]
_rptr = self._read_unpack(1)
if _rptr < 4:
raise ProtocolError(f'{self.alias}: [Optno {kind}] invalid format') # depends on [control=['if'], data=[]]
data = dict(kind=kind, type=self._read_opt_type(kind), length=size, pointer=_rptr)
counter = 4
address = list()
endpoint = min(_rptr, size)
while counter < endpoint:
counter += 4
address.append(self._read_ipv4_addr()) # depends on [control=['while'], data=['counter']]
data['ip'] = address or None
return data |
def fit_model(y, x, yMaxLag, xMaxLag, includesOriginalX=True, noIntercept=False, sc=None):
"""
Fit an autoregressive model with additional exogenous variables. The model predicts a value
at time t of a dependent variable, Y, as a function of previous values of Y, and a combination
of previous values of exogenous regressors X_i, and current values of exogenous regressors X_i.
This is a generalization of an AR model, which is simply an ARX with no exogenous regressors.
The fitting procedure here is the same, using least squares. Note that all lags up to the
maxlag are included. In the case of the dependent variable the max lag is 'yMaxLag', while
for the exogenous variables the max lag is 'xMaxLag', with which each column in the original
matrix provided is lagged accordingly.
Parameters
----------
y:
the dependent variable, time series as a Numpy array
x:
a matrix of exogenous variables as a Numpy array
yMaxLag:
the maximum lag order for the dependent variable
xMaxLag:
the maximum lag order for exogenous variables
includesOriginalX:
a boolean flag indicating if the non-lagged exogenous variables should
be included. Default is true
noIntercept:
a boolean flag indicating if the intercept should be dropped. Default is
false
Returns an ARXModel, which is an autoregressive model with exogenous variables.
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
jmodel = jvm.com.cloudera.sparkts.models.AutoregressionX.fitModel(_nparray2breezevector(sc, y.toArray()), _nparray2breezematrix(sc, x.toArray()), yMaxLag, xMaxLag, includesOriginalX, noIntercept)
return ARXModel(jmodel=jmodel, sc=sc) | def function[fit_model, parameter[y, x, yMaxLag, xMaxLag, includesOriginalX, noIntercept, sc]]:
constant[
Fit an autoregressive model with additional exogenous variables. The model predicts a value
at time t of a dependent variable, Y, as a function of previous values of Y, and a combination
of previous values of exogenous regressors X_i, and current values of exogenous regressors X_i.
This is a generalization of an AR model, which is simply an ARX with no exogenous regressors.
The fitting procedure here is the same, using least squares. Note that all lags up to the
maxlag are included. In the case of the dependent variable the max lag is 'yMaxLag', while
for the exogenous variables the max lag is 'xMaxLag', with which each column in the original
matrix provided is lagged accordingly.
Parameters
----------
y:
the dependent variable, time series as a Numpy array
x:
a matrix of exogenous variables as a Numpy array
yMaxLag:
the maximum lag order for the dependent variable
xMaxLag:
the maximum lag order for exogenous variables
includesOriginalX:
a boolean flag indicating if the non-lagged exogenous variables should
be included. Default is true
noIntercept:
a boolean flag indicating if the intercept should be dropped. Default is
false
Returns an ARXModel, which is an autoregressive model with exogenous variables.
]
assert[compare[name[sc] not_equal[!=] constant[None]]]
variable[jvm] assign[=] name[sc]._jvm
variable[jmodel] assign[=] call[name[jvm].com.cloudera.sparkts.models.AutoregressionX.fitModel, parameter[call[name[_nparray2breezevector], parameter[name[sc], call[name[y].toArray, parameter[]]]], call[name[_nparray2breezematrix], parameter[name[sc], call[name[x].toArray, parameter[]]]], name[yMaxLag], name[xMaxLag], name[includesOriginalX], name[noIntercept]]]
return[call[name[ARXModel], parameter[]]] | keyword[def] identifier[fit_model] ( identifier[y] , identifier[x] , identifier[yMaxLag] , identifier[xMaxLag] , identifier[includesOriginalX] = keyword[True] , identifier[noIntercept] = keyword[False] , identifier[sc] = keyword[None] ):
literal[string]
keyword[assert] identifier[sc] != keyword[None] , literal[string]
identifier[jvm] = identifier[sc] . identifier[_jvm]
identifier[jmodel] = identifier[jvm] . identifier[com] . identifier[cloudera] . identifier[sparkts] . identifier[models] . identifier[AutoregressionX] . identifier[fitModel] ( identifier[_nparray2breezevector] ( identifier[sc] , identifier[y] . identifier[toArray] ()), identifier[_nparray2breezematrix] ( identifier[sc] , identifier[x] . identifier[toArray] ()), identifier[yMaxLag] , identifier[xMaxLag] , identifier[includesOriginalX] , identifier[noIntercept] )
keyword[return] identifier[ARXModel] ( identifier[jmodel] = identifier[jmodel] , identifier[sc] = identifier[sc] ) | def fit_model(y, x, yMaxLag, xMaxLag, includesOriginalX=True, noIntercept=False, sc=None):
"""
Fit an autoregressive model with additional exogenous variables. The model predicts a value
at time t of a dependent variable, Y, as a function of previous values of Y, and a combination
of previous values of exogenous regressors X_i, and current values of exogenous regressors X_i.
This is a generalization of an AR model, which is simply an ARX with no exogenous regressors.
The fitting procedure here is the same, using least squares. Note that all lags up to the
maxlag are included. In the case of the dependent variable the max lag is 'yMaxLag', while
for the exogenous variables the max lag is 'xMaxLag', with which each column in the original
matrix provided is lagged accordingly.
Parameters
----------
y:
the dependent variable, time series as a Numpy array
x:
a matrix of exogenous variables as a Numpy array
yMaxLag:
the maximum lag order for the dependent variable
xMaxLag:
the maximum lag order for exogenous variables
includesOriginalX:
a boolean flag indicating if the non-lagged exogenous variables should
be included. Default is true
noIntercept:
a boolean flag indicating if the intercept should be dropped. Default is
false
Returns an ARXModel, which is an autoregressive model with exogenous variables.
"""
assert sc != None, 'Missing SparkContext'
jvm = sc._jvm
jmodel = jvm.com.cloudera.sparkts.models.AutoregressionX.fitModel(_nparray2breezevector(sc, y.toArray()), _nparray2breezematrix(sc, x.toArray()), yMaxLag, xMaxLag, includesOriginalX, noIntercept)
return ARXModel(jmodel=jmodel, sc=sc) |
def pg2df(res):
'''
takes a getlog requests result
returns a table as df
'''
# parse res
soup = BeautifulSoup(res.text)
if u'Pas de r\xe9ponse pour cette recherche.' in soup.text:
pass # <-- don't pass !
else:
params = urlparse.parse_qs(urlparse.urlsplit(res.url)[-2])
tb = soup.find_all('table')[0]
data = [
[col.text for col in row.find_all('td')] + [params['dep'][0], params['mod'][0]]
for row in tb.find_all('tr')[1:]] # <-- escape header row
data = [dict(zip(
[u'depcom', u'date', u'obs', u'dep', u'mod'], lst)) for lst in data]
return pd.DataFrame(data) | def function[pg2df, parameter[res]]:
constant[
takes a getlog requests result
returns a table as df
]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[res].text]]
if compare[constant[Pas de réponse pour cette recherche.] in name[soup].text] begin[:]
pass | keyword[def] identifier[pg2df] ( identifier[res] ):
literal[string]
identifier[soup] = identifier[BeautifulSoup] ( identifier[res] . identifier[text] )
keyword[if] literal[string] keyword[in] identifier[soup] . identifier[text] :
keyword[pass]
keyword[else] :
identifier[params] = identifier[urlparse] . identifier[parse_qs] ( identifier[urlparse] . identifier[urlsplit] ( identifier[res] . identifier[url] )[- literal[int] ])
identifier[tb] = identifier[soup] . identifier[find_all] ( literal[string] )[ literal[int] ]
identifier[data] =[
[ identifier[col] . identifier[text] keyword[for] identifier[col] keyword[in] identifier[row] . identifier[find_all] ( literal[string] )]+[ identifier[params] [ literal[string] ][ literal[int] ], identifier[params] [ literal[string] ][ literal[int] ]]
keyword[for] identifier[row] keyword[in] identifier[tb] . identifier[find_all] ( literal[string] )[ literal[int] :]]
identifier[data] =[ identifier[dict] ( identifier[zip] (
[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[lst] )) keyword[for] identifier[lst] keyword[in] identifier[data] ]
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[data] ) | def pg2df(res):
"""
takes a getlog requests result
returns a table as df
"""
# parse res
soup = BeautifulSoup(res.text)
if u'Pas de réponse pour cette recherche.' in soup.text:
pass # <-- don't pass ! # depends on [control=['if'], data=[]]
else:
params = urlparse.parse_qs(urlparse.urlsplit(res.url)[-2])
tb = soup.find_all('table')[0]
data = [[col.text for col in row.find_all('td')] + [params['dep'][0], params['mod'][0]] for row in tb.find_all('tr')[1:]] # <-- escape header row
data = [dict(zip([u'depcom', u'date', u'obs', u'dep', u'mod'], lst)) for lst in data]
return pd.DataFrame(data) |
def method_already_there(object_type, method_name, this_class_only=False):
"""
Returns True if method `method_name` is already implemented by object_type, that is, its implementation differs from
the one in `object`.
:param object_type:
:param method_name:
:param this_class_only:
:return:
"""
if this_class_only:
return method_name in vars(object_type) # or object_type.__dict__
else:
try:
method = getattr(object_type, method_name)
except AttributeError:
return False
else:
return method is not None and method is not getattr(object, method_name, None) | def function[method_already_there, parameter[object_type, method_name, this_class_only]]:
constant[
Returns True if method `method_name` is already implemented by object_type, that is, its implementation differs from
the one in `object`.
:param object_type:
:param method_name:
:param this_class_only:
:return:
]
if name[this_class_only] begin[:]
return[compare[name[method_name] in call[name[vars], parameter[name[object_type]]]]] | keyword[def] identifier[method_already_there] ( identifier[object_type] , identifier[method_name] , identifier[this_class_only] = keyword[False] ):
literal[string]
keyword[if] identifier[this_class_only] :
keyword[return] identifier[method_name] keyword[in] identifier[vars] ( identifier[object_type] )
keyword[else] :
keyword[try] :
identifier[method] = identifier[getattr] ( identifier[object_type] , identifier[method_name] )
keyword[except] identifier[AttributeError] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[method] keyword[is] keyword[not] keyword[None] keyword[and] identifier[method] keyword[is] keyword[not] identifier[getattr] ( identifier[object] , identifier[method_name] , keyword[None] ) | def method_already_there(object_type, method_name, this_class_only=False):
"""
Returns True if method `method_name` is already implemented by object_type, that is, its implementation differs from
the one in `object`.
:param object_type:
:param method_name:
:param this_class_only:
:return:
"""
if this_class_only:
return method_name in vars(object_type) # or object_type.__dict__ # depends on [control=['if'], data=[]]
else:
try:
method = getattr(object_type, method_name) # depends on [control=['try'], data=[]]
except AttributeError:
return False # depends on [control=['except'], data=[]]
else:
return method is not None and method is not getattr(object, method_name, None) |
def _check_parameter_range(s_min, s_max):
r"""Performs a final check on a clipped parameter range.
.. note::
This is a helper for :func:`clip_range`.
If both values are unchanged from the "unset" default, this returns
the whole interval :math:`\left[0.0, 1.0\right]`.
If only one of the values is set to some parameter :math:`s`, this
returns the "degenerate" interval :math:`\left[s, s\right]`. (We rely
on the fact that ``s_min`` must be the only set value, based on how
:func:`_update_parameters` works.)
Otherwise, this simply returns ``[s_min, s_max]``.
Args:
s_min (float): Current start of clipped interval. If "unset", this
value will be ``DEFAULT_S_MIN``.
s_max (float): Current end of clipped interval. If "unset", this
value will be ``DEFAULT_S_MAX``.
Returns:
Tuple[float, float]: The (possibly updated) start and end
of the clipped parameter range.
"""
if s_min == DEFAULT_S_MIN:
# Based on the way ``_update_parameters`` works, we know
# both parameters must be unset if ``s_min``.
return 0.0, 1.0
if s_max == DEFAULT_S_MAX:
return s_min, s_min
return s_min, s_max | def function[_check_parameter_range, parameter[s_min, s_max]]:
constant[Performs a final check on a clipped parameter range.
.. note::
This is a helper for :func:`clip_range`.
If both values are unchanged from the "unset" default, this returns
the whole interval :math:`\left[0.0, 1.0\right]`.
If only one of the values is set to some parameter :math:`s`, this
returns the "degenerate" interval :math:`\left[s, s\right]`. (We rely
on the fact that ``s_min`` must be the only set value, based on how
:func:`_update_parameters` works.)
Otherwise, this simply returns ``[s_min, s_max]``.
Args:
s_min (float): Current start of clipped interval. If "unset", this
value will be ``DEFAULT_S_MIN``.
s_max (float): Current end of clipped interval. If "unset", this
value will be ``DEFAULT_S_MAX``.
Returns:
Tuple[float, float]: The (possibly updated) start and end
of the clipped parameter range.
]
if compare[name[s_min] equal[==] name[DEFAULT_S_MIN]] begin[:]
return[tuple[[<ast.Constant object at 0x7da204620550>, <ast.Constant object at 0x7da204622fe0>]]]
if compare[name[s_max] equal[==] name[DEFAULT_S_MAX]] begin[:]
return[tuple[[<ast.Name object at 0x7da204621b70>, <ast.Name object at 0x7da18bc71e10>]]]
return[tuple[[<ast.Name object at 0x7da18bc731f0>, <ast.Name object at 0x7da18bc70070>]]] | keyword[def] identifier[_check_parameter_range] ( identifier[s_min] , identifier[s_max] ):
literal[string]
keyword[if] identifier[s_min] == identifier[DEFAULT_S_MIN] :
keyword[return] literal[int] , literal[int]
keyword[if] identifier[s_max] == identifier[DEFAULT_S_MAX] :
keyword[return] identifier[s_min] , identifier[s_min]
keyword[return] identifier[s_min] , identifier[s_max] | def _check_parameter_range(s_min, s_max):
"""Performs a final check on a clipped parameter range.
.. note::
This is a helper for :func:`clip_range`.
If both values are unchanged from the "unset" default, this returns
the whole interval :math:`\\left[0.0, 1.0\\right]`.
If only one of the values is set to some parameter :math:`s`, this
returns the "degenerate" interval :math:`\\left[s, s\\right]`. (We rely
on the fact that ``s_min`` must be the only set value, based on how
:func:`_update_parameters` works.)
Otherwise, this simply returns ``[s_min, s_max]``.
Args:
s_min (float): Current start of clipped interval. If "unset", this
value will be ``DEFAULT_S_MIN``.
s_max (float): Current end of clipped interval. If "unset", this
value will be ``DEFAULT_S_MAX``.
Returns:
Tuple[float, float]: The (possibly updated) start and end
of the clipped parameter range.
"""
if s_min == DEFAULT_S_MIN:
# Based on the way ``_update_parameters`` works, we know
# both parameters must be unset if ``s_min``.
return (0.0, 1.0) # depends on [control=['if'], data=[]]
if s_max == DEFAULT_S_MAX:
return (s_min, s_min) # depends on [control=['if'], data=[]]
return (s_min, s_max) |
def _remove_overlaps(in_file, out_dir, data):
"""Remove regions that overlap with next region, these result in issues with PureCN.
"""
out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file)))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
prev_line = None
for line in in_handle:
if prev_line:
pchrom, pstart, pend = prev_line.split("\t", 4)[:3]
cchrom, cstart, cend = line.split("\t", 4)[:3]
# Skip if chromosomes match and end overlaps start
if pchrom == cchrom and int(pend) > int(cstart):
pass
else:
out_handle.write(prev_line)
prev_line = line
out_handle.write(prev_line)
return out_file | def function[_remove_overlaps, parameter[in_file, out_dir, data]]:
constant[Remove regions that overlap with next region, these result in issues with PureCN.
]
variable[out_file] assign[=] call[name[os].path.join, parameter[name[out_dir], binary_operation[constant[%s-nooverlaps%s] <ast.Mod object at 0x7da2590d6920> call[name[utils].splitext_plus, parameter[call[name[os].path.basename, parameter[name[in_file]]]]]]]]
if <ast.UnaryOp object at 0x7da1b1846ec0> begin[:]
with call[name[file_transaction], parameter[name[data], name[out_file]]] begin[:]
with call[name[open], parameter[name[in_file]]] begin[:]
with call[name[open], parameter[name[tx_out_file], constant[w]]] begin[:]
variable[prev_line] assign[=] constant[None]
for taget[name[line]] in starred[name[in_handle]] begin[:]
if name[prev_line] begin[:]
<ast.Tuple object at 0x7da1b18467a0> assign[=] call[call[name[prev_line].split, parameter[constant[ ], constant[4]]]][<ast.Slice object at 0x7da1b1846470>]
<ast.Tuple object at 0x7da1b1846110> assign[=] call[call[name[line].split, parameter[constant[ ], constant[4]]]][<ast.Slice object at 0x7da1b1847b50>]
if <ast.BoolOp object at 0x7da1b18473d0> begin[:]
pass
variable[prev_line] assign[=] name[line]
call[name[out_handle].write, parameter[name[prev_line]]]
return[name[out_file]] | keyword[def] identifier[_remove_overlaps] ( identifier[in_file] , identifier[out_dir] , identifier[data] ):
literal[string]
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] % identifier[utils] . identifier[splitext_plus] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[in_file] )))
keyword[if] keyword[not] identifier[utils] . identifier[file_uptodate] ( identifier[out_file] , identifier[in_file] ):
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[out_file] ) keyword[as] identifier[tx_out_file] :
keyword[with] identifier[open] ( identifier[in_file] ) keyword[as] identifier[in_handle] :
keyword[with] identifier[open] ( identifier[tx_out_file] , literal[string] ) keyword[as] identifier[out_handle] :
identifier[prev_line] = keyword[None]
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
keyword[if] identifier[prev_line] :
identifier[pchrom] , identifier[pstart] , identifier[pend] = identifier[prev_line] . identifier[split] ( literal[string] , literal[int] )[: literal[int] ]
identifier[cchrom] , identifier[cstart] , identifier[cend] = identifier[line] . identifier[split] ( literal[string] , literal[int] )[: literal[int] ]
keyword[if] identifier[pchrom] == identifier[cchrom] keyword[and] identifier[int] ( identifier[pend] )> identifier[int] ( identifier[cstart] ):
keyword[pass]
keyword[else] :
identifier[out_handle] . identifier[write] ( identifier[prev_line] )
identifier[prev_line] = identifier[line]
identifier[out_handle] . identifier[write] ( identifier[prev_line] )
keyword[return] identifier[out_file] | def _remove_overlaps(in_file, out_dir, data):
"""Remove regions that overlap with next region, these result in issues with PureCN.
"""
out_file = os.path.join(out_dir, '%s-nooverlaps%s' % utils.splitext_plus(os.path.basename(in_file)))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, 'w') as out_handle:
prev_line = None
for line in in_handle:
if prev_line:
(pchrom, pstart, pend) = prev_line.split('\t', 4)[:3]
(cchrom, cstart, cend) = line.split('\t', 4)[:3]
# Skip if chromosomes match and end overlaps start
if pchrom == cchrom and int(pend) > int(cstart):
pass # depends on [control=['if'], data=[]]
else:
out_handle.write(prev_line) # depends on [control=['if'], data=[]]
prev_line = line # depends on [control=['for'], data=['line']]
out_handle.write(prev_line) # depends on [control=['with'], data=['out_handle']] # depends on [control=['with'], data=['open', 'in_handle']] # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
return out_file |
def new(conf):
"""Factory to create hash functions from configuration section. If an
algorithm takes custom parameters, you can separate them by a colon like
this: pbkdf2:arg1:arg2:arg3."""
algorithm = conf.get("algorithm")
salt = conf.get("salt").encode("utf-8")
if algorithm == "none":
return Hash(salt, None)
elif algorithm.startswith("pbkdf2"):
kwargs = {}
tail = algorithm.partition(":")[2]
for func, key in ((int, "iterations"), (int, "dklen"), (str, "func")):
head, _, tail = tail.partition(":")
if not head:
break
kwargs[key] = func(head)
return PBKDF2(salt, **kwargs)
else:
return Hash(salt, algorithm) | def function[new, parameter[conf]]:
constant[Factory to create hash functions from configuration section. If an
algorithm takes custom parameters, you can separate them by a colon like
this: pbkdf2:arg1:arg2:arg3.]
variable[algorithm] assign[=] call[name[conf].get, parameter[constant[algorithm]]]
variable[salt] assign[=] call[call[name[conf].get, parameter[constant[salt]]].encode, parameter[constant[utf-8]]]
if compare[name[algorithm] equal[==] constant[none]] begin[:]
return[call[name[Hash], parameter[name[salt], constant[None]]]] | keyword[def] identifier[new] ( identifier[conf] ):
literal[string]
identifier[algorithm] = identifier[conf] . identifier[get] ( literal[string] )
identifier[salt] = identifier[conf] . identifier[get] ( literal[string] ). identifier[encode] ( literal[string] )
keyword[if] identifier[algorithm] == literal[string] :
keyword[return] identifier[Hash] ( identifier[salt] , keyword[None] )
keyword[elif] identifier[algorithm] . identifier[startswith] ( literal[string] ):
identifier[kwargs] ={}
identifier[tail] = identifier[algorithm] . identifier[partition] ( literal[string] )[ literal[int] ]
keyword[for] identifier[func] , identifier[key] keyword[in] (( identifier[int] , literal[string] ),( identifier[int] , literal[string] ),( identifier[str] , literal[string] )):
identifier[head] , identifier[_] , identifier[tail] = identifier[tail] . identifier[partition] ( literal[string] )
keyword[if] keyword[not] identifier[head] :
keyword[break]
identifier[kwargs] [ identifier[key] ]= identifier[func] ( identifier[head] )
keyword[return] identifier[PBKDF2] ( identifier[salt] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[Hash] ( identifier[salt] , identifier[algorithm] ) | def new(conf):
"""Factory to create hash functions from configuration section. If an
algorithm takes custom parameters, you can separate them by a colon like
this: pbkdf2:arg1:arg2:arg3."""
algorithm = conf.get('algorithm')
salt = conf.get('salt').encode('utf-8')
if algorithm == 'none':
return Hash(salt, None) # depends on [control=['if'], data=[]]
elif algorithm.startswith('pbkdf2'):
kwargs = {}
tail = algorithm.partition(':')[2]
for (func, key) in ((int, 'iterations'), (int, 'dklen'), (str, 'func')):
(head, _, tail) = tail.partition(':')
if not head:
break # depends on [control=['if'], data=[]]
kwargs[key] = func(head) # depends on [control=['for'], data=[]]
return PBKDF2(salt, **kwargs) # depends on [control=['if'], data=[]]
else:
return Hash(salt, algorithm) |
async def handle_action(self, action: str, request_id: str, **kwargs):
"""
run the action.
"""
try:
await self.check_permissions(action, **kwargs)
if action not in self.actions:
raise MethodNotAllowed(method=action)
content, status = await self.call_view(
action=action,
**kwargs
)
await self.reply(
action=action,
request_id=request_id,
data=content,
status=status
)
except Exception as exc:
await self.handle_exception(
exc,
action=action,
request_id=request_id
) | <ast.AsyncFunctionDef object at 0x7da204623cd0> | keyword[async] keyword[def] identifier[handle_action] ( identifier[self] , identifier[action] : identifier[str] , identifier[request_id] : identifier[str] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[await] identifier[self] . identifier[check_permissions] ( identifier[action] ,** identifier[kwargs] )
keyword[if] identifier[action] keyword[not] keyword[in] identifier[self] . identifier[actions] :
keyword[raise] identifier[MethodNotAllowed] ( identifier[method] = identifier[action] )
identifier[content] , identifier[status] = keyword[await] identifier[self] . identifier[call_view] (
identifier[action] = identifier[action] ,
** identifier[kwargs]
)
keyword[await] identifier[self] . identifier[reply] (
identifier[action] = identifier[action] ,
identifier[request_id] = identifier[request_id] ,
identifier[data] = identifier[content] ,
identifier[status] = identifier[status]
)
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
keyword[await] identifier[self] . identifier[handle_exception] (
identifier[exc] ,
identifier[action] = identifier[action] ,
identifier[request_id] = identifier[request_id]
) | async def handle_action(self, action: str, request_id: str, **kwargs):
"""
run the action.
"""
try:
await self.check_permissions(action, **kwargs)
if action not in self.actions:
raise MethodNotAllowed(method=action) # depends on [control=['if'], data=['action']]
(content, status) = await self.call_view(action=action, **kwargs)
await self.reply(action=action, request_id=request_id, data=content, status=status) # depends on [control=['try'], data=[]]
except Exception as exc:
await self.handle_exception(exc, action=action, request_id=request_id) # depends on [control=['except'], data=['exc']] |
def margin(self):
"""
[float] 总保证金
"""
return sum(position.margin for position in six.itervalues(self._positions)) | def function[margin, parameter[self]]:
constant[
[float] 总保证金
]
return[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b21782b0>]]] | keyword[def] identifier[margin] ( identifier[self] ):
literal[string]
keyword[return] identifier[sum] ( identifier[position] . identifier[margin] keyword[for] identifier[position] keyword[in] identifier[six] . identifier[itervalues] ( identifier[self] . identifier[_positions] )) | def margin(self):
"""
[float] 总保证金
"""
return sum((position.margin for position in six.itervalues(self._positions))) |
def restore_layout(self, name, *args):
"""
Restores given layout.
:param name: Layout name.
:type name: unicode
:param \*args: Arguments.
:type \*args: \*
:return: Method success.
:rtype: bool
"""
layout = self.__layouts.get(name)
if not layout:
raise umbra.exceptions.LayoutExistError("{0} | '{1}' layout isn't registered!".format(
self.__class__.__name__, name))
LOGGER.debug("> Restoring layout '{0}'.".format(name))
for component, profile in self.__container.components_manager:
if profile.category == "QWidget" and component not in self.__container.visible_components:
interface = self.__container.components_manager.get_interface(component)
interface and interface.hide()
self.__current_layout = name
self.__container.centralWidget().setVisible(
self.__settings.get_key("Layouts", "{0}_central_widget".format(name)).toBool())
self.__container.restoreState(
self.__settings.get_key("Layouts", "{0}_window_state".format(name)).toByteArray())
self.__restore_geometry_on_layout_change and \
self.__container.restoreGeometry(
self.__settings.get_key("Layouts", "{0}_geometry".format(name)).toByteArray())
self.layout_restored.emit(self.__current_layout)
return True | def function[restore_layout, parameter[self, name]]:
constant[
Restores given layout.
:param name: Layout name.
:type name: unicode
:param \*args: Arguments.
:type \*args: \*
:return: Method success.
:rtype: bool
]
variable[layout] assign[=] call[name[self].__layouts.get, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da1b09d03a0> begin[:]
<ast.Raise object at 0x7da1b09d27d0>
call[name[LOGGER].debug, parameter[call[constant[> Restoring layout '{0}'.].format, parameter[name[name]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b09d0160>, <ast.Name object at 0x7da1b09d38b0>]]] in starred[name[self].__container.components_manager] begin[:]
if <ast.BoolOp object at 0x7da1b09d27a0> begin[:]
variable[interface] assign[=] call[name[self].__container.components_manager.get_interface, parameter[name[component]]]
<ast.BoolOp object at 0x7da1b09d07f0>
name[self].__current_layout assign[=] name[name]
call[call[name[self].__container.centralWidget, parameter[]].setVisible, parameter[call[call[name[self].__settings.get_key, parameter[constant[Layouts], call[constant[{0}_central_widget].format, parameter[name[name]]]]].toBool, parameter[]]]]
call[name[self].__container.restoreState, parameter[call[call[name[self].__settings.get_key, parameter[constant[Layouts], call[constant[{0}_window_state].format, parameter[name[name]]]]].toByteArray, parameter[]]]]
<ast.BoolOp object at 0x7da1b0965690>
call[name[self].layout_restored.emit, parameter[name[self].__current_layout]]
return[constant[True]] | keyword[def] identifier[restore_layout] ( identifier[self] , identifier[name] ,* identifier[args] ):
literal[string]
identifier[layout] = identifier[self] . identifier[__layouts] . identifier[get] ( identifier[name] )
keyword[if] keyword[not] identifier[layout] :
keyword[raise] identifier[umbra] . identifier[exceptions] . identifier[LayoutExistError] ( literal[string] . identifier[format] (
identifier[self] . identifier[__class__] . identifier[__name__] , identifier[name] ))
identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[name] ))
keyword[for] identifier[component] , identifier[profile] keyword[in] identifier[self] . identifier[__container] . identifier[components_manager] :
keyword[if] identifier[profile] . identifier[category] == literal[string] keyword[and] identifier[component] keyword[not] keyword[in] identifier[self] . identifier[__container] . identifier[visible_components] :
identifier[interface] = identifier[self] . identifier[__container] . identifier[components_manager] . identifier[get_interface] ( identifier[component] )
identifier[interface] keyword[and] identifier[interface] . identifier[hide] ()
identifier[self] . identifier[__current_layout] = identifier[name]
identifier[self] . identifier[__container] . identifier[centralWidget] (). identifier[setVisible] (
identifier[self] . identifier[__settings] . identifier[get_key] ( literal[string] , literal[string] . identifier[format] ( identifier[name] )). identifier[toBool] ())
identifier[self] . identifier[__container] . identifier[restoreState] (
identifier[self] . identifier[__settings] . identifier[get_key] ( literal[string] , literal[string] . identifier[format] ( identifier[name] )). identifier[toByteArray] ())
identifier[self] . identifier[__restore_geometry_on_layout_change] keyword[and] identifier[self] . identifier[__container] . identifier[restoreGeometry] (
identifier[self] . identifier[__settings] . identifier[get_key] ( literal[string] , literal[string] . identifier[format] ( identifier[name] )). identifier[toByteArray] ())
identifier[self] . identifier[layout_restored] . identifier[emit] ( identifier[self] . identifier[__current_layout] )
keyword[return] keyword[True] | def restore_layout(self, name, *args):
"""
Restores given layout.
:param name: Layout name.
:type name: unicode
:param \\*args: Arguments.
:type \\*args: \\*
:return: Method success.
:rtype: bool
"""
layout = self.__layouts.get(name)
if not layout:
raise umbra.exceptions.LayoutExistError("{0} | '{1}' layout isn't registered!".format(self.__class__.__name__, name)) # depends on [control=['if'], data=[]]
LOGGER.debug("> Restoring layout '{0}'.".format(name))
for (component, profile) in self.__container.components_manager:
if profile.category == 'QWidget' and component not in self.__container.visible_components:
interface = self.__container.components_manager.get_interface(component)
interface and interface.hide() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self.__current_layout = name
self.__container.centralWidget().setVisible(self.__settings.get_key('Layouts', '{0}_central_widget'.format(name)).toBool())
self.__container.restoreState(self.__settings.get_key('Layouts', '{0}_window_state'.format(name)).toByteArray())
self.__restore_geometry_on_layout_change and self.__container.restoreGeometry(self.__settings.get_key('Layouts', '{0}_geometry'.format(name)).toByteArray())
self.layout_restored.emit(self.__current_layout)
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.