code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def _do_sse_request(self, path, params=None):
"""Query Marathon server for events."""
urls = [''.join([server.rstrip('/'), path]) for server in self.servers]
while urls:
url = urls.pop()
try:
# Requests does not set the original Authorization header on cross origin
# redirects. If set allow_redirects=True we may get a 401 response.
response = self.sse_session.get(
url,
params=params,
stream=True,
headers={'Accept': 'text/event-stream'},
auth=self.auth,
verify=self.verify,
allow_redirects=False
)
except Exception as e:
marathon.log.error(
'Error while calling %s: %s', url, e.message)
else:
if response.is_redirect and response.next:
urls.append(response.next.url)
marathon.log.debug("Got redirect to {}".format(response.next.url))
elif response.ok:
return response.iter_lines()
raise MarathonError('No remaining Marathon servers to try') | def function[_do_sse_request, parameter[self, path, params]]:
constant[Query Marathon server for events.]
variable[urls] assign[=] <ast.ListComp object at 0x7da1b0f580a0>
while name[urls] begin[:]
variable[url] assign[=] call[name[urls].pop, parameter[]]
<ast.Try object at 0x7da1b0f59120>
<ast.Raise object at 0x7da1b0f5bdc0> | keyword[def] identifier[_do_sse_request] ( identifier[self] , identifier[path] , identifier[params] = keyword[None] ):
literal[string]
identifier[urls] =[ literal[string] . identifier[join] ([ identifier[server] . identifier[rstrip] ( literal[string] ), identifier[path] ]) keyword[for] identifier[server] keyword[in] identifier[self] . identifier[servers] ]
keyword[while] identifier[urls] :
identifier[url] = identifier[urls] . identifier[pop] ()
keyword[try] :
identifier[response] = identifier[self] . identifier[sse_session] . identifier[get] (
identifier[url] ,
identifier[params] = identifier[params] ,
identifier[stream] = keyword[True] ,
identifier[headers] ={ literal[string] : literal[string] },
identifier[auth] = identifier[self] . identifier[auth] ,
identifier[verify] = identifier[self] . identifier[verify] ,
identifier[allow_redirects] = keyword[False]
)
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[marathon] . identifier[log] . identifier[error] (
literal[string] , identifier[url] , identifier[e] . identifier[message] )
keyword[else] :
keyword[if] identifier[response] . identifier[is_redirect] keyword[and] identifier[response] . identifier[next] :
identifier[urls] . identifier[append] ( identifier[response] . identifier[next] . identifier[url] )
identifier[marathon] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[response] . identifier[next] . identifier[url] ))
keyword[elif] identifier[response] . identifier[ok] :
keyword[return] identifier[response] . identifier[iter_lines] ()
keyword[raise] identifier[MarathonError] ( literal[string] ) | def _do_sse_request(self, path, params=None):
"""Query Marathon server for events."""
urls = [''.join([server.rstrip('/'), path]) for server in self.servers]
while urls:
url = urls.pop()
try:
# Requests does not set the original Authorization header on cross origin
# redirects. If set allow_redirects=True we may get a 401 response.
response = self.sse_session.get(url, params=params, stream=True, headers={'Accept': 'text/event-stream'}, auth=self.auth, verify=self.verify, allow_redirects=False) # depends on [control=['try'], data=[]]
except Exception as e:
marathon.log.error('Error while calling %s: %s', url, e.message) # depends on [control=['except'], data=['e']]
else:
if response.is_redirect and response.next:
urls.append(response.next.url)
marathon.log.debug('Got redirect to {}'.format(response.next.url)) # depends on [control=['if'], data=[]]
elif response.ok:
return response.iter_lines() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
raise MarathonError('No remaining Marathon servers to try') |
def get_filename_extensions(url='https://www.webopedia.com/quick_ref/fileextensionsfull.asp'):
""" Load a DataFrame of filename extensions from the indicated url
>>> df = get_filename_extensions('https://www.openoffice.org/dev_docs/source/file_extensions.html')
>>> df.head(2)
ext description
0 .a UNIX static library file.
1 .asm Non-UNIX assembler source file.
"""
df = get_longest_table(url)
columns = list(df.columns)
columns[0] = 'ext'
columns[1] = 'description'
if len(columns) > 2:
columns[2] = 'details'
df.columns = columns
return df | def function[get_filename_extensions, parameter[url]]:
constant[ Load a DataFrame of filename extensions from the indicated url
>>> df = get_filename_extensions('https://www.openoffice.org/dev_docs/source/file_extensions.html')
>>> df.head(2)
ext description
0 .a UNIX static library file.
1 .asm Non-UNIX assembler source file.
]
variable[df] assign[=] call[name[get_longest_table], parameter[name[url]]]
variable[columns] assign[=] call[name[list], parameter[name[df].columns]]
call[name[columns]][constant[0]] assign[=] constant[ext]
call[name[columns]][constant[1]] assign[=] constant[description]
if compare[call[name[len], parameter[name[columns]]] greater[>] constant[2]] begin[:]
call[name[columns]][constant[2]] assign[=] constant[details]
name[df].columns assign[=] name[columns]
return[name[df]] | keyword[def] identifier[get_filename_extensions] ( identifier[url] = literal[string] ):
literal[string]
identifier[df] = identifier[get_longest_table] ( identifier[url] )
identifier[columns] = identifier[list] ( identifier[df] . identifier[columns] )
identifier[columns] [ literal[int] ]= literal[string]
identifier[columns] [ literal[int] ]= literal[string]
keyword[if] identifier[len] ( identifier[columns] )> literal[int] :
identifier[columns] [ literal[int] ]= literal[string]
identifier[df] . identifier[columns] = identifier[columns]
keyword[return] identifier[df] | def get_filename_extensions(url='https://www.webopedia.com/quick_ref/fileextensionsfull.asp'):
""" Load a DataFrame of filename extensions from the indicated url
>>> df = get_filename_extensions('https://www.openoffice.org/dev_docs/source/file_extensions.html')
>>> df.head(2)
ext description
0 .a UNIX static library file.
1 .asm Non-UNIX assembler source file.
"""
df = get_longest_table(url)
columns = list(df.columns)
columns[0] = 'ext'
columns[1] = 'description'
if len(columns) > 2:
columns[2] = 'details' # depends on [control=['if'], data=[]]
df.columns = columns
return df |
def compose_ants_transforms(transform_list):
"""
Compose multiple ANTsTransform's together
ANTsR function: `composeAntsrTransforms`
Arguments
---------
transform_list : list/tuple of ANTsTransform object
list of transforms to compose together
Returns
-------
ANTsTransform
one transform that contains all given transforms
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_ants_data("r16")).clone('float')
>>> tx = ants.new_ants_transform(dimension=2)
>>> tx.set_parameters((0.9,0,0,1.1,10,11))
>>> inv_tx = tx.invert()
>>> single_tx = ants.compose_ants_transforms([tx, inv_tx])
>>> img_orig = single_tx.apply_to_image(img, img)
>>> rRotGenerator = ants.contrib.RandomRotate2D( ( 0, 40 ), reference=img )
>>> rShearGenerator=ants.contrib.RandomShear2D( (0,50), reference=img )
>>> tx1 = rRotGenerator.transform()
>>> tx2 = rShearGenerator.transform()
>>> rSrR = ants.compose_ants_transforms([tx1, tx2])
>>> rSrR.apply_to_image( img )
"""
precision = transform_list[0].precision
dimension = transform_list[0].dimension
for tx in transform_list:
if precision != tx.precision:
raise ValueError('All transforms must have the same precision')
if dimension != tx.dimension:
raise ValueError('All transforms must have the same dimension')
tx_ptr_list = list(reversed([tf.pointer for tf in transform_list]))
libfn = utils.get_lib_fn('composeTransforms%s' % (transform_list[0]._libsuffix))
itk_composed_tx = libfn(tx_ptr_list, precision, dimension)
return ANTsTransform(precision=precision, dimension=dimension,
transform_type='CompositeTransform', pointer=itk_composed_tx) | def function[compose_ants_transforms, parameter[transform_list]]:
constant[
Compose multiple ANTsTransform's together
ANTsR function: `composeAntsrTransforms`
Arguments
---------
transform_list : list/tuple of ANTsTransform object
list of transforms to compose together
Returns
-------
ANTsTransform
one transform that contains all given transforms
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_ants_data("r16")).clone('float')
>>> tx = ants.new_ants_transform(dimension=2)
>>> tx.set_parameters((0.9,0,0,1.1,10,11))
>>> inv_tx = tx.invert()
>>> single_tx = ants.compose_ants_transforms([tx, inv_tx])
>>> img_orig = single_tx.apply_to_image(img, img)
>>> rRotGenerator = ants.contrib.RandomRotate2D( ( 0, 40 ), reference=img )
>>> rShearGenerator=ants.contrib.RandomShear2D( (0,50), reference=img )
>>> tx1 = rRotGenerator.transform()
>>> tx2 = rShearGenerator.transform()
>>> rSrR = ants.compose_ants_transforms([tx1, tx2])
>>> rSrR.apply_to_image( img )
]
variable[precision] assign[=] call[name[transform_list]][constant[0]].precision
variable[dimension] assign[=] call[name[transform_list]][constant[0]].dimension
for taget[name[tx]] in starred[name[transform_list]] begin[:]
if compare[name[precision] not_equal[!=] name[tx].precision] begin[:]
<ast.Raise object at 0x7da1b151a320>
if compare[name[dimension] not_equal[!=] name[tx].dimension] begin[:]
<ast.Raise object at 0x7da1b151b790>
variable[tx_ptr_list] assign[=] call[name[list], parameter[call[name[reversed], parameter[<ast.ListComp object at 0x7da1b151b2e0>]]]]
variable[libfn] assign[=] call[name[utils].get_lib_fn, parameter[binary_operation[constant[composeTransforms%s] <ast.Mod object at 0x7da2590d6920> call[name[transform_list]][constant[0]]._libsuffix]]]
variable[itk_composed_tx] assign[=] call[name[libfn], parameter[name[tx_ptr_list], name[precision], name[dimension]]]
return[call[name[ANTsTransform], parameter[]]] | keyword[def] identifier[compose_ants_transforms] ( identifier[transform_list] ):
literal[string]
identifier[precision] = identifier[transform_list] [ literal[int] ]. identifier[precision]
identifier[dimension] = identifier[transform_list] [ literal[int] ]. identifier[dimension]
keyword[for] identifier[tx] keyword[in] identifier[transform_list] :
keyword[if] identifier[precision] != identifier[tx] . identifier[precision] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[dimension] != identifier[tx] . identifier[dimension] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[tx_ptr_list] = identifier[list] ( identifier[reversed] ([ identifier[tf] . identifier[pointer] keyword[for] identifier[tf] keyword[in] identifier[transform_list] ]))
identifier[libfn] = identifier[utils] . identifier[get_lib_fn] ( literal[string] %( identifier[transform_list] [ literal[int] ]. identifier[_libsuffix] ))
identifier[itk_composed_tx] = identifier[libfn] ( identifier[tx_ptr_list] , identifier[precision] , identifier[dimension] )
keyword[return] identifier[ANTsTransform] ( identifier[precision] = identifier[precision] , identifier[dimension] = identifier[dimension] ,
identifier[transform_type] = literal[string] , identifier[pointer] = identifier[itk_composed_tx] ) | def compose_ants_transforms(transform_list):
"""
Compose multiple ANTsTransform's together
ANTsR function: `composeAntsrTransforms`
Arguments
---------
transform_list : list/tuple of ANTsTransform object
list of transforms to compose together
Returns
-------
ANTsTransform
one transform that contains all given transforms
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_ants_data("r16")).clone('float')
>>> tx = ants.new_ants_transform(dimension=2)
>>> tx.set_parameters((0.9,0,0,1.1,10,11))
>>> inv_tx = tx.invert()
>>> single_tx = ants.compose_ants_transforms([tx, inv_tx])
>>> img_orig = single_tx.apply_to_image(img, img)
>>> rRotGenerator = ants.contrib.RandomRotate2D( ( 0, 40 ), reference=img )
>>> rShearGenerator=ants.contrib.RandomShear2D( (0,50), reference=img )
>>> tx1 = rRotGenerator.transform()
>>> tx2 = rShearGenerator.transform()
>>> rSrR = ants.compose_ants_transforms([tx1, tx2])
>>> rSrR.apply_to_image( img )
"""
precision = transform_list[0].precision
dimension = transform_list[0].dimension
for tx in transform_list:
if precision != tx.precision:
raise ValueError('All transforms must have the same precision') # depends on [control=['if'], data=[]]
if dimension != tx.dimension:
raise ValueError('All transforms must have the same dimension') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tx']]
tx_ptr_list = list(reversed([tf.pointer for tf in transform_list]))
libfn = utils.get_lib_fn('composeTransforms%s' % transform_list[0]._libsuffix)
itk_composed_tx = libfn(tx_ptr_list, precision, dimension)
return ANTsTransform(precision=precision, dimension=dimension, transform_type='CompositeTransform', pointer=itk_composed_tx) |
def biopython_to_partials(alignment, datatype):
""" Generate a partials dictionary from a treeCl.Alignment """
partials_dict = {}
for seq in alignment:
partials_dict[seq.name] = seq_to_partials(seq, datatype)
return partials_dict | def function[biopython_to_partials, parameter[alignment, datatype]]:
constant[ Generate a partials dictionary from a treeCl.Alignment ]
variable[partials_dict] assign[=] dictionary[[], []]
for taget[name[seq]] in starred[name[alignment]] begin[:]
call[name[partials_dict]][name[seq].name] assign[=] call[name[seq_to_partials], parameter[name[seq], name[datatype]]]
return[name[partials_dict]] | keyword[def] identifier[biopython_to_partials] ( identifier[alignment] , identifier[datatype] ):
literal[string]
identifier[partials_dict] ={}
keyword[for] identifier[seq] keyword[in] identifier[alignment] :
identifier[partials_dict] [ identifier[seq] . identifier[name] ]= identifier[seq_to_partials] ( identifier[seq] , identifier[datatype] )
keyword[return] identifier[partials_dict] | def biopython_to_partials(alignment, datatype):
""" Generate a partials dictionary from a treeCl.Alignment """
partials_dict = {}
for seq in alignment:
partials_dict[seq.name] = seq_to_partials(seq, datatype) # depends on [control=['for'], data=['seq']]
return partials_dict |
async def debug_text(self, texts: Union[str, list], positions: Union[list, set], color=(0, 255, 0), size_px=16):
""" Deprecated, may be removed soon """
if isinstance(positions, (set, list)):
if not positions:
return
if isinstance(texts, str):
texts = [texts] * len(positions)
assert len(texts) == len(positions)
await self._execute(
debug=sc_pb.RequestDebug(
debug=[
debug_pb.DebugCommand(
draw=debug_pb.DebugDraw(
text=[
debug_pb.DebugText(
text=t,
color=debug_pb.Color(r=color[0], g=color[1], b=color[2]),
world_pos=common_pb.Point(x=p.x, y=p.y, z=getattr(p, "z", 10)),
size=size_px,
)
for t, p in zip(texts, positions)
]
)
)
]
)
)
else:
await self.debug_text([texts], [positions], color) | <ast.AsyncFunctionDef object at 0x7da18bc71120> | keyword[async] keyword[def] identifier[debug_text] ( identifier[self] , identifier[texts] : identifier[Union] [ identifier[str] , identifier[list] ], identifier[positions] : identifier[Union] [ identifier[list] , identifier[set] ], identifier[color] =( literal[int] , literal[int] , literal[int] ), identifier[size_px] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[positions] ,( identifier[set] , identifier[list] )):
keyword[if] keyword[not] identifier[positions] :
keyword[return]
keyword[if] identifier[isinstance] ( identifier[texts] , identifier[str] ):
identifier[texts] =[ identifier[texts] ]* identifier[len] ( identifier[positions] )
keyword[assert] identifier[len] ( identifier[texts] )== identifier[len] ( identifier[positions] )
keyword[await] identifier[self] . identifier[_execute] (
identifier[debug] = identifier[sc_pb] . identifier[RequestDebug] (
identifier[debug] =[
identifier[debug_pb] . identifier[DebugCommand] (
identifier[draw] = identifier[debug_pb] . identifier[DebugDraw] (
identifier[text] =[
identifier[debug_pb] . identifier[DebugText] (
identifier[text] = identifier[t] ,
identifier[color] = identifier[debug_pb] . identifier[Color] ( identifier[r] = identifier[color] [ literal[int] ], identifier[g] = identifier[color] [ literal[int] ], identifier[b] = identifier[color] [ literal[int] ]),
identifier[world_pos] = identifier[common_pb] . identifier[Point] ( identifier[x] = identifier[p] . identifier[x] , identifier[y] = identifier[p] . identifier[y] , identifier[z] = identifier[getattr] ( identifier[p] , literal[string] , literal[int] )),
identifier[size] = identifier[size_px] ,
)
keyword[for] identifier[t] , identifier[p] keyword[in] identifier[zip] ( identifier[texts] , identifier[positions] )
]
)
)
]
)
)
keyword[else] :
keyword[await] identifier[self] . identifier[debug_text] ([ identifier[texts] ],[ identifier[positions] ], identifier[color] ) | async def debug_text(self, texts: Union[str, list], positions: Union[list, set], color=(0, 255, 0), size_px=16):
""" Deprecated, may be removed soon """
if isinstance(positions, (set, list)):
if not positions:
return # depends on [control=['if'], data=[]]
if isinstance(texts, str):
texts = [texts] * len(positions) # depends on [control=['if'], data=[]]
assert len(texts) == len(positions)
await self._execute(debug=sc_pb.RequestDebug(debug=[debug_pb.DebugCommand(draw=debug_pb.DebugDraw(text=[debug_pb.DebugText(text=t, color=debug_pb.Color(r=color[0], g=color[1], b=color[2]), world_pos=common_pb.Point(x=p.x, y=p.y, z=getattr(p, 'z', 10)), size=size_px) for (t, p) in zip(texts, positions)]))])) # depends on [control=['if'], data=[]]
else:
await self.debug_text([texts], [positions], color) |
def is_binary(self, component):
"""
especially useful for constraints
tells whether any component (star, envelope) is part of a binary
by checking its parent
"""
if component not in self._is_binary.keys():
self._update_cache()
return self._is_binary.get(component) | def function[is_binary, parameter[self, component]]:
constant[
especially useful for constraints
tells whether any component (star, envelope) is part of a binary
by checking its parent
]
if compare[name[component] <ast.NotIn object at 0x7da2590d7190> call[name[self]._is_binary.keys, parameter[]]] begin[:]
call[name[self]._update_cache, parameter[]]
return[call[name[self]._is_binary.get, parameter[name[component]]]] | keyword[def] identifier[is_binary] ( identifier[self] , identifier[component] ):
literal[string]
keyword[if] identifier[component] keyword[not] keyword[in] identifier[self] . identifier[_is_binary] . identifier[keys] ():
identifier[self] . identifier[_update_cache] ()
keyword[return] identifier[self] . identifier[_is_binary] . identifier[get] ( identifier[component] ) | def is_binary(self, component):
"""
especially useful for constraints
tells whether any component (star, envelope) is part of a binary
by checking its parent
"""
if component not in self._is_binary.keys():
self._update_cache() # depends on [control=['if'], data=[]]
return self._is_binary.get(component) |
def decipher(self,string,keep_punct=False):
"""Decipher string using Simple Substitution cipher according to initialised key.
Example::
plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext)
:param string: The string to decipher.
:param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False.
:returns: The deciphered string.
"""
# if we have not yet calculated the inverse key, calculate it now
if self.invkey == '':
for i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
self.invkey += self.i2a(self.key.index(i))
if not keep_punct: string = self.remove_punctuation(string)
ret = ''
for c in string.upper():
if c.isalpha(): ret += self.invkey[self.a2i(c)]
else: ret += c
return ret | def function[decipher, parameter[self, string, keep_punct]]:
constant[Decipher string using Simple Substitution cipher according to initialised key.
Example::
plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext)
:param string: The string to decipher.
:param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False.
:returns: The deciphered string.
]
if compare[name[self].invkey equal[==] constant[]] begin[:]
for taget[name[i]] in starred[constant[ABCDEFGHIJKLMNOPQRSTUVWXYZ]] begin[:]
<ast.AugAssign object at 0x7da1b065b940>
if <ast.UnaryOp object at 0x7da1b065b1c0> begin[:]
variable[string] assign[=] call[name[self].remove_punctuation, parameter[name[string]]]
variable[ret] assign[=] constant[]
for taget[name[c]] in starred[call[name[string].upper, parameter[]]] begin[:]
if call[name[c].isalpha, parameter[]] begin[:]
<ast.AugAssign object at 0x7da1b065b8e0>
return[name[ret]] | keyword[def] identifier[decipher] ( identifier[self] , identifier[string] , identifier[keep_punct] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[invkey] == literal[string] :
keyword[for] identifier[i] keyword[in] literal[string] :
identifier[self] . identifier[invkey] += identifier[self] . identifier[i2a] ( identifier[self] . identifier[key] . identifier[index] ( identifier[i] ))
keyword[if] keyword[not] identifier[keep_punct] : identifier[string] = identifier[self] . identifier[remove_punctuation] ( identifier[string] )
identifier[ret] = literal[string]
keyword[for] identifier[c] keyword[in] identifier[string] . identifier[upper] ():
keyword[if] identifier[c] . identifier[isalpha] (): identifier[ret] += identifier[self] . identifier[invkey] [ identifier[self] . identifier[a2i] ( identifier[c] )]
keyword[else] : identifier[ret] += identifier[c]
keyword[return] identifier[ret] | def decipher(self, string, keep_punct=False):
"""Decipher string using Simple Substitution cipher according to initialised key.
Example::
plaintext = SimpleSubstitution('AJPCZWRLFBDKOTYUQGENHXMIVS').decipher(ciphertext)
:param string: The string to decipher.
:param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False.
:returns: The deciphered string.
"""
# if we have not yet calculated the inverse key, calculate it now
if self.invkey == '':
for i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
self.invkey += self.i2a(self.key.index(i)) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
if not keep_punct:
string = self.remove_punctuation(string) # depends on [control=['if'], data=[]]
ret = ''
for c in string.upper():
if c.isalpha():
ret += self.invkey[self.a2i(c)] # depends on [control=['if'], data=[]]
else:
ret += c # depends on [control=['for'], data=['c']]
return ret |
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
sig = signature(obj.__func__)
if obj.__self__ is None:
# Unbound method: the first parameter becomes positional-only
if sig.parameters:
first = sig.parameters.values()[0].replace(
kind=_POSITIONAL_ONLY)
return sig.replace(
parameters=(first,) + tuple(sig.parameters.values())[1:])
else:
return sig
else:
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL)
and not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError(
'callable {0!r} is not supported by signature'.format(obj)) | def function[signature, parameter[obj]]:
constant[Get a signature object for the passed callable.]
if <ast.UnaryOp object at 0x7da1b1817e50> begin[:]
<ast.Raise object at 0x7da1b1817d90>
if call[name[isinstance], parameter[name[obj], name[types].MethodType]] begin[:]
variable[sig] assign[=] call[name[signature], parameter[name[obj].__func__]]
if compare[name[obj].__self__ is constant[None]] begin[:]
if name[sig].parameters begin[:]
variable[first] assign[=] call[call[call[name[sig].parameters.values, parameter[]]][constant[0]].replace, parameter[]]
return[call[name[sig].replace, parameter[]]]
<ast.Try object at 0x7da1b1814ee0>
<ast.Try object at 0x7da1b18151e0>
if call[name[isinstance], parameter[name[obj], name[types].FunctionType]] begin[:]
return[call[name[Signature].from_function, parameter[name[obj]]]]
if call[name[isinstance], parameter[name[obj], name[functools].partial]] begin[:]
variable[sig] assign[=] call[name[signature], parameter[name[obj].func]]
variable[new_params] assign[=] call[name[OrderedDict], parameter[call[name[sig].parameters.items, parameter[]]]]
variable[partial_args] assign[=] <ast.BoolOp object at 0x7da1b1815ae0>
variable[partial_keywords] assign[=] <ast.BoolOp object at 0x7da1b1815c00>
<ast.Try object at 0x7da1b1815cc0>
for taget[tuple[[<ast.Name object at 0x7da1b1816a10>, <ast.Name object at 0x7da1b18169e0>]]] in starred[call[name[ba].arguments.items, parameter[]]] begin[:]
variable[param] assign[=] call[name[new_params]][name[arg_name]]
if compare[name[arg_name] in name[partial_keywords]] begin[:]
call[name[new_params]][name[arg_name]] assign[=] call[name[param].replace, parameter[]]
return[call[name[sig].replace, parameter[]]]
variable[sig] assign[=] constant[None]
if call[name[isinstance], parameter[name[obj], name[type]]] begin[:]
variable[call] assign[=] call[name[_get_user_defined_method], parameter[call[name[type], parameter[name[obj]]], constant[__call__]]]
if compare[name[call] is_not constant[None]] begin[:]
variable[sig] assign[=] call[name[signature], parameter[name[call]]]
if compare[name[sig] is_not constant[None]] begin[:]
return[call[name[sig].replace, parameter[]]]
if call[name[isinstance], parameter[name[obj], name[types].BuiltinFunctionType]] begin[:]
variable[msg] assign[=] call[constant[no signature found for builtin function {0!r}].format, parameter[name[obj]]]
<ast.Raise object at 0x7da1b18dd1e0>
<ast.Raise object at 0x7da1b18def20> | keyword[def] identifier[signature] ( identifier[obj] ):
literal[string]
keyword[if] keyword[not] identifier[callable] ( identifier[obj] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[obj] ))
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[MethodType] ):
identifier[sig] = identifier[signature] ( identifier[obj] . identifier[__func__] )
keyword[if] identifier[obj] . identifier[__self__] keyword[is] keyword[None] :
keyword[if] identifier[sig] . identifier[parameters] :
identifier[first] = identifier[sig] . identifier[parameters] . identifier[values] ()[ literal[int] ]. identifier[replace] (
identifier[kind] = identifier[_POSITIONAL_ONLY] )
keyword[return] identifier[sig] . identifier[replace] (
identifier[parameters] =( identifier[first] ,)+ identifier[tuple] ( identifier[sig] . identifier[parameters] . identifier[values] ())[ literal[int] :])
keyword[else] :
keyword[return] identifier[sig]
keyword[else] :
keyword[return] identifier[sig] . identifier[replace] ( identifier[parameters] = identifier[tuple] ( identifier[sig] . identifier[parameters] . identifier[values] ())[ literal[int] :])
keyword[try] :
identifier[sig] = identifier[obj] . identifier[__signature__]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
keyword[if] identifier[sig] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[sig]
keyword[try] :
identifier[wrapped] = identifier[obj] . identifier[__wrapped__]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
keyword[return] identifier[signature] ( identifier[wrapped] )
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[FunctionType] ):
keyword[return] identifier[Signature] . identifier[from_function] ( identifier[obj] )
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[functools] . identifier[partial] ):
identifier[sig] = identifier[signature] ( identifier[obj] . identifier[func] )
identifier[new_params] = identifier[OrderedDict] ( identifier[sig] . identifier[parameters] . identifier[items] ())
identifier[partial_args] = identifier[obj] . identifier[args] keyword[or] ()
identifier[partial_keywords] = identifier[obj] . identifier[keywords] keyword[or] {}
keyword[try] :
identifier[ba] = identifier[sig] . identifier[bind_partial] (* identifier[partial_args] ,** identifier[partial_keywords] )
keyword[except] identifier[TypeError] :
identifier[msg] = literal[string] . identifier[format] ( identifier[obj] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[for] identifier[arg_name] , identifier[arg_value] keyword[in] identifier[ba] . identifier[arguments] . identifier[items] ():
identifier[param] = identifier[new_params] [ identifier[arg_name] ]
keyword[if] identifier[arg_name] keyword[in] identifier[partial_keywords] :
identifier[new_params] [ identifier[arg_name] ]= identifier[param] . identifier[replace] ( identifier[default] = identifier[arg_value] ,
identifier[_partial_kwarg] = keyword[True] )
keyword[elif] ( identifier[param] . identifier[kind] keyword[not] keyword[in] ( identifier[_VAR_KEYWORD] , identifier[_VAR_POSITIONAL] )
keyword[and] keyword[not] identifier[param] . identifier[_partial_kwarg] ):
identifier[new_params] . identifier[pop] ( identifier[arg_name] )
keyword[return] identifier[sig] . identifier[replace] ( identifier[parameters] = identifier[new_params] . identifier[values] ())
identifier[sig] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[type] ):
identifier[call] = identifier[_get_user_defined_method] ( identifier[type] ( identifier[obj] ), literal[string] )
keyword[if] identifier[call] keyword[is] keyword[not] keyword[None] :
identifier[sig] = identifier[signature] ( identifier[call] )
keyword[else] :
identifier[new] = identifier[_get_user_defined_method] ( identifier[obj] , literal[string] )
keyword[if] identifier[new] keyword[is] keyword[not] keyword[None] :
identifier[sig] = identifier[signature] ( identifier[new] )
keyword[else] :
identifier[init] = identifier[_get_user_defined_method] ( identifier[obj] , literal[string] )
keyword[if] identifier[init] keyword[is] keyword[not] keyword[None] :
identifier[sig] = identifier[signature] ( identifier[init] )
keyword[elif] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[_NonUserDefinedCallables] ):
identifier[call] = identifier[_get_user_defined_method] ( identifier[type] ( identifier[obj] ), literal[string] , literal[string] )
keyword[if] identifier[call] keyword[is] keyword[not] keyword[None] :
identifier[sig] = identifier[signature] ( identifier[call] )
keyword[if] identifier[sig] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[sig] . identifier[replace] ( identifier[parameters] = identifier[tuple] ( identifier[sig] . identifier[parameters] . identifier[values] ())[ literal[int] :])
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[BuiltinFunctionType] ):
identifier[msg] = literal[string] . identifier[format] ( identifier[obj] )
keyword[raise] identifier[ValueError] ( identifier[msg] )
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[obj] )) | def signature(obj):
"""Get a signature object for the passed callable."""
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj)) # depends on [control=['if'], data=[]]
if isinstance(obj, types.MethodType):
sig = signature(obj.__func__)
if obj.__self__ is None:
# Unbound method: the first parameter becomes positional-only
if sig.parameters:
first = sig.parameters.values()[0].replace(kind=_POSITIONAL_ONLY)
return sig.replace(parameters=(first,) + tuple(sig.parameters.values())[1:]) # depends on [control=['if'], data=[]]
else:
return sig # depends on [control=['if'], data=[]]
else:
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
return sig.replace(parameters=tuple(sig.parameters.values())[1:]) # depends on [control=['if'], data=[]]
try:
sig = obj.__signature__ # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
else:
if sig is not None:
return sig # depends on [control=['if'], data=['sig']]
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__ # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj) # depends on [control=['if'], data=[]]
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords) # depends on [control=['try'], data=[]]
except TypeError:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg) # depends on [control=['except'], data=[]]
for (arg_name, arg_value) in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value, _partial_kwarg=True) # depends on [control=['if'], data=['arg_name']]
elif param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and (not param._partial_kwarg):
new_params.pop(arg_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return sig.replace(parameters=new_params.values()) # depends on [control=['if'], data=[]]
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call) # depends on [control=['if'], data=['call']]
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new) # depends on [control=['if'], data=['new']]
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init) # depends on [control=['if'], data=['init']] # depends on [control=['if'], data=[]]
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call) # depends on [control=['if'], data=['call']] # depends on [control=['if'], data=[]]
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:]) # depends on [control=['if'], data=['sig']]
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg) # depends on [control=['if'], data=[]]
raise ValueError('callable {0!r} is not supported by signature'.format(obj)) |
def h(values):
"""
Function calculates entropy.
values: list of integers
"""
ent = np.true_divide(values, np.sum(values))
return -np.sum(np.multiply(ent, np.log2(ent))) | def function[h, parameter[values]]:
constant[
Function calculates entropy.
values: list of integers
]
variable[ent] assign[=] call[name[np].true_divide, parameter[name[values], call[name[np].sum, parameter[name[values]]]]]
return[<ast.UnaryOp object at 0x7da1b2368e20>] | keyword[def] identifier[h] ( identifier[values] ):
literal[string]
identifier[ent] = identifier[np] . identifier[true_divide] ( identifier[values] , identifier[np] . identifier[sum] ( identifier[values] ))
keyword[return] - identifier[np] . identifier[sum] ( identifier[np] . identifier[multiply] ( identifier[ent] , identifier[np] . identifier[log2] ( identifier[ent] ))) | def h(values):
"""
Function calculates entropy.
values: list of integers
"""
ent = np.true_divide(values, np.sum(values))
return -np.sum(np.multiply(ent, np.log2(ent))) |
def index(method=None, delete=False):
"""Decorator to update index.
:param method: Function wrapped. (Default: ``None``)
:param delete: If `True` delete the indexed record. (Default: ``None``)
"""
if method is None:
return partial(index, delete=delete)
@wraps(method)
def wrapper(self_or_cls, *args, **kwargs):
"""Send record for indexing."""
result = method(self_or_cls, *args, **kwargs)
try:
if delete:
self_or_cls.indexer.delete(result)
else:
self_or_cls.indexer.index(result)
except RequestError:
current_app.logger.exception('Could not index {0}.'.format(result))
return result
return wrapper | def function[index, parameter[method, delete]]:
constant[Decorator to update index.
:param method: Function wrapped. (Default: ``None``)
:param delete: If `True` delete the indexed record. (Default: ``None``)
]
if compare[name[method] is constant[None]] begin[:]
return[call[name[partial], parameter[name[index]]]]
def function[wrapper, parameter[self_or_cls]]:
constant[Send record for indexing.]
variable[result] assign[=] call[name[method], parameter[name[self_or_cls], <ast.Starred object at 0x7da1aff1d120>]]
<ast.Try object at 0x7da1aff1c880>
return[name[result]]
return[name[wrapper]] | keyword[def] identifier[index] ( identifier[method] = keyword[None] , identifier[delete] = keyword[False] ):
literal[string]
keyword[if] identifier[method] keyword[is] keyword[None] :
keyword[return] identifier[partial] ( identifier[index] , identifier[delete] = identifier[delete] )
@ identifier[wraps] ( identifier[method] )
keyword[def] identifier[wrapper] ( identifier[self_or_cls] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[result] = identifier[method] ( identifier[self_or_cls] ,* identifier[args] ,** identifier[kwargs] )
keyword[try] :
keyword[if] identifier[delete] :
identifier[self_or_cls] . identifier[indexer] . identifier[delete] ( identifier[result] )
keyword[else] :
identifier[self_or_cls] . identifier[indexer] . identifier[index] ( identifier[result] )
keyword[except] identifier[RequestError] :
identifier[current_app] . identifier[logger] . identifier[exception] ( literal[string] . identifier[format] ( identifier[result] ))
keyword[return] identifier[result]
keyword[return] identifier[wrapper] | def index(method=None, delete=False):
"""Decorator to update index.
:param method: Function wrapped. (Default: ``None``)
:param delete: If `True` delete the indexed record. (Default: ``None``)
"""
if method is None:
return partial(index, delete=delete) # depends on [control=['if'], data=[]]
@wraps(method)
def wrapper(self_or_cls, *args, **kwargs):
"""Send record for indexing."""
result = method(self_or_cls, *args, **kwargs)
try:
if delete:
self_or_cls.indexer.delete(result) # depends on [control=['if'], data=[]]
else:
self_or_cls.indexer.index(result) # depends on [control=['try'], data=[]]
except RequestError:
current_app.logger.exception('Could not index {0}.'.format(result)) # depends on [control=['except'], data=[]]
return result
return wrapper |
def elfhash(s):
"""
:param string: bytes
>>> import base64
>>> s = base64.b64encode(b'hello world')
>>> elfhash(s)
224648685
"""
hash = 0
x = 0
for c in s:
hash = (hash << 4) + c
x = hash & 0xF0000000
if x:
hash ^= (x >> 24)
hash &= ~x
return (hash & 0x7FFFFFFF) | def function[elfhash, parameter[s]]:
constant[
:param string: bytes
>>> import base64
>>> s = base64.b64encode(b'hello world')
>>> elfhash(s)
224648685
]
variable[hash] assign[=] constant[0]
variable[x] assign[=] constant[0]
for taget[name[c]] in starred[name[s]] begin[:]
variable[hash] assign[=] binary_operation[binary_operation[name[hash] <ast.LShift object at 0x7da2590d69e0> constant[4]] + name[c]]
variable[x] assign[=] binary_operation[name[hash] <ast.BitAnd object at 0x7da2590d6b60> constant[4026531840]]
if name[x] begin[:]
<ast.AugAssign object at 0x7da18c4ccb50>
<ast.AugAssign object at 0x7da1b23463e0>
return[binary_operation[name[hash] <ast.BitAnd object at 0x7da2590d6b60> constant[2147483647]]] | keyword[def] identifier[elfhash] ( identifier[s] ):
literal[string]
identifier[hash] = literal[int]
identifier[x] = literal[int]
keyword[for] identifier[c] keyword[in] identifier[s] :
identifier[hash] =( identifier[hash] << literal[int] )+ identifier[c]
identifier[x] = identifier[hash] & literal[int]
keyword[if] identifier[x] :
identifier[hash] ^=( identifier[x] >> literal[int] )
identifier[hash] &=~ identifier[x]
keyword[return] ( identifier[hash] & literal[int] ) | def elfhash(s):
"""
:param string: bytes
>>> import base64
>>> s = base64.b64encode(b'hello world')
>>> elfhash(s)
224648685
"""
hash = 0
x = 0
for c in s:
hash = (hash << 4) + c
x = hash & 4026531840
if x:
hash ^= x >> 24
hash &= ~x # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
return hash & 2147483647 |
def divideSeries(requestContext, dividendSeriesList, divisorSeriesList):
"""
Takes a dividend metric and a divisor metric and draws the division result.
A constant may *not* be passed. To divide by a constant, use the scale()
function (which is essentially a multiplication operation) and use the
inverse of the dividend. (Division by 8 = multiplication by 1/8 or 0.125)
Example::
&target=divideSeries(Series.dividends,Series.divisors)
"""
if len(divisorSeriesList) == 0:
for series in dividendSeriesList:
series.name = "divideSeries(%s,MISSING)" % series.name
series.pathExpression = series.name
for i in range(len(series)):
series[i] = None
return dividendSeriesList
if len(divisorSeriesList) > 1:
raise ValueError(
"divideSeries second argument must reference exactly 1 series"
" (got {0})".format(len(divisorSeriesList)))
[divisorSeries] = divisorSeriesList
results = []
for dividendSeries in dividendSeriesList:
name = "divideSeries(%s,%s)" % (dividendSeries.name,
divisorSeries.name)
bothSeries = (dividendSeries, divisorSeries)
step = reduce(lcm, [s.step for s in bothSeries])
for s in bothSeries:
s.consolidate(step / s.step)
start = min([s.start for s in bothSeries])
end = max([s.end for s in bothSeries])
end -= (end - start) % step
values = (safeDiv(v1, v2) for v1, v2 in zip_longest(*bothSeries))
quotientSeries = TimeSeries(name, start, end, step, values)
quotientSeries.pathExpression = name
results.append(quotientSeries)
return results | def function[divideSeries, parameter[requestContext, dividendSeriesList, divisorSeriesList]]:
constant[
Takes a dividend metric and a divisor metric and draws the division result.
A constant may *not* be passed. To divide by a constant, use the scale()
function (which is essentially a multiplication operation) and use the
inverse of the dividend. (Division by 8 = multiplication by 1/8 or 0.125)
Example::
&target=divideSeries(Series.dividends,Series.divisors)
]
if compare[call[name[len], parameter[name[divisorSeriesList]]] equal[==] constant[0]] begin[:]
for taget[name[series]] in starred[name[dividendSeriesList]] begin[:]
name[series].name assign[=] binary_operation[constant[divideSeries(%s,MISSING)] <ast.Mod object at 0x7da2590d6920> name[series].name]
name[series].pathExpression assign[=] name[series].name
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[series]]]]]] begin[:]
call[name[series]][name[i]] assign[=] constant[None]
return[name[dividendSeriesList]]
if compare[call[name[len], parameter[name[divisorSeriesList]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b06dc730>
<ast.List object at 0x7da1b06de410> assign[=] name[divisorSeriesList]
variable[results] assign[=] list[[]]
for taget[name[dividendSeries]] in starred[name[dividendSeriesList]] begin[:]
variable[name] assign[=] binary_operation[constant[divideSeries(%s,%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b06deb30>, <ast.Attribute object at 0x7da1b06dc910>]]]
variable[bothSeries] assign[=] tuple[[<ast.Name object at 0x7da1b06df5b0>, <ast.Name object at 0x7da1b06de5f0>]]
variable[step] assign[=] call[name[reduce], parameter[name[lcm], <ast.ListComp object at 0x7da1b06df250>]]
for taget[name[s]] in starred[name[bothSeries]] begin[:]
call[name[s].consolidate, parameter[binary_operation[name[step] / name[s].step]]]
variable[start] assign[=] call[name[min], parameter[<ast.ListComp object at 0x7da1b06de650>]]
variable[end] assign[=] call[name[max], parameter[<ast.ListComp object at 0x7da1b06dd510>]]
<ast.AugAssign object at 0x7da204620190>
variable[values] assign[=] <ast.GeneratorExp object at 0x7da2046232e0>
variable[quotientSeries] assign[=] call[name[TimeSeries], parameter[name[name], name[start], name[end], name[step], name[values]]]
name[quotientSeries].pathExpression assign[=] name[name]
call[name[results].append, parameter[name[quotientSeries]]]
return[name[results]] | keyword[def] identifier[divideSeries] ( identifier[requestContext] , identifier[dividendSeriesList] , identifier[divisorSeriesList] ):
literal[string]
keyword[if] identifier[len] ( identifier[divisorSeriesList] )== literal[int] :
keyword[for] identifier[series] keyword[in] identifier[dividendSeriesList] :
identifier[series] . identifier[name] = literal[string] % identifier[series] . identifier[name]
identifier[series] . identifier[pathExpression] = identifier[series] . identifier[name]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[series] )):
identifier[series] [ identifier[i] ]= keyword[None]
keyword[return] identifier[dividendSeriesList]
keyword[if] identifier[len] ( identifier[divisorSeriesList] )> literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[len] ( identifier[divisorSeriesList] )))
[ identifier[divisorSeries] ]= identifier[divisorSeriesList]
identifier[results] =[]
keyword[for] identifier[dividendSeries] keyword[in] identifier[dividendSeriesList] :
identifier[name] = literal[string] %( identifier[dividendSeries] . identifier[name] ,
identifier[divisorSeries] . identifier[name] )
identifier[bothSeries] =( identifier[dividendSeries] , identifier[divisorSeries] )
identifier[step] = identifier[reduce] ( identifier[lcm] ,[ identifier[s] . identifier[step] keyword[for] identifier[s] keyword[in] identifier[bothSeries] ])
keyword[for] identifier[s] keyword[in] identifier[bothSeries] :
identifier[s] . identifier[consolidate] ( identifier[step] / identifier[s] . identifier[step] )
identifier[start] = identifier[min] ([ identifier[s] . identifier[start] keyword[for] identifier[s] keyword[in] identifier[bothSeries] ])
identifier[end] = identifier[max] ([ identifier[s] . identifier[end] keyword[for] identifier[s] keyword[in] identifier[bothSeries] ])
identifier[end] -=( identifier[end] - identifier[start] )% identifier[step]
identifier[values] =( identifier[safeDiv] ( identifier[v1] , identifier[v2] ) keyword[for] identifier[v1] , identifier[v2] keyword[in] identifier[zip_longest] (* identifier[bothSeries] ))
identifier[quotientSeries] = identifier[TimeSeries] ( identifier[name] , identifier[start] , identifier[end] , identifier[step] , identifier[values] )
identifier[quotientSeries] . identifier[pathExpression] = identifier[name]
identifier[results] . identifier[append] ( identifier[quotientSeries] )
keyword[return] identifier[results] | def divideSeries(requestContext, dividendSeriesList, divisorSeriesList):
"""
Takes a dividend metric and a divisor metric and draws the division result.
A constant may *not* be passed. To divide by a constant, use the scale()
function (which is essentially a multiplication operation) and use the
inverse of the dividend. (Division by 8 = multiplication by 1/8 or 0.125)
Example::
&target=divideSeries(Series.dividends,Series.divisors)
"""
if len(divisorSeriesList) == 0:
for series in dividendSeriesList:
series.name = 'divideSeries(%s,MISSING)' % series.name
series.pathExpression = series.name
for i in range(len(series)):
series[i] = None # depends on [control=['for'], data=['i']]
return dividendSeriesList # depends on [control=['for'], data=['series']] # depends on [control=['if'], data=[]]
if len(divisorSeriesList) > 1:
raise ValueError('divideSeries second argument must reference exactly 1 series (got {0})'.format(len(divisorSeriesList))) # depends on [control=['if'], data=[]]
[divisorSeries] = divisorSeriesList
results = []
for dividendSeries in dividendSeriesList:
name = 'divideSeries(%s,%s)' % (dividendSeries.name, divisorSeries.name)
bothSeries = (dividendSeries, divisorSeries)
step = reduce(lcm, [s.step for s in bothSeries])
for s in bothSeries:
s.consolidate(step / s.step) # depends on [control=['for'], data=['s']]
start = min([s.start for s in bothSeries])
end = max([s.end for s in bothSeries])
end -= (end - start) % step
values = (safeDiv(v1, v2) for (v1, v2) in zip_longest(*bothSeries))
quotientSeries = TimeSeries(name, start, end, step, values)
quotientSeries.pathExpression = name
results.append(quotientSeries) # depends on [control=['for'], data=['dividendSeries']]
return results |
def runSearchContinuousSets(self, request):
"""
Returns a SearchContinuousSetsResponse for the specified
SearchContinuousSetsRequest object.
"""
return self.runSearchRequest(
request, protocol.SearchContinuousSetsRequest,
protocol.SearchContinuousSetsResponse,
self.continuousSetsGenerator) | def function[runSearchContinuousSets, parameter[self, request]]:
constant[
Returns a SearchContinuousSetsResponse for the specified
SearchContinuousSetsRequest object.
]
return[call[name[self].runSearchRequest, parameter[name[request], name[protocol].SearchContinuousSetsRequest, name[protocol].SearchContinuousSetsResponse, name[self].continuousSetsGenerator]]] | keyword[def] identifier[runSearchContinuousSets] ( identifier[self] , identifier[request] ):
literal[string]
keyword[return] identifier[self] . identifier[runSearchRequest] (
identifier[request] , identifier[protocol] . identifier[SearchContinuousSetsRequest] ,
identifier[protocol] . identifier[SearchContinuousSetsResponse] ,
identifier[self] . identifier[continuousSetsGenerator] ) | def runSearchContinuousSets(self, request):
"""
Returns a SearchContinuousSetsResponse for the specified
SearchContinuousSetsRequest object.
"""
return self.runSearchRequest(request, protocol.SearchContinuousSetsRequest, protocol.SearchContinuousSetsResponse, self.continuousSetsGenerator) |
def set_orders(self, object_pks):
"""
Perform a mass update of sort_orders across the full queryset.
Accepts a list, object_pks, of the intended order for the objects.
Works as follows:
- Compile a list of all sort orders in the queryset. Leave out anything that
isn't in the object_pks list - this deals with pagination and any
inconsistencies.
- Get the maximum among all model object sort orders. Update the queryset to add
it to all the existing sort order values. This lifts them 'out of the way' of
unique_together clashes when setting the intended sort orders.
- Set the sort order on each object. Use only sort_order values that the objects
had before calling this method, so they get rearranged in place.
Performs O(n) queries.
"""
objects_to_sort = self.filter(pk__in=object_pks)
max_value = self.model.objects.all().aggregate(
models.Max('sort_order')
)['sort_order__max']
# Call list() on the values right away, so they don't get affected by the
# update() later (since values_list() is lazy).
orders = list(objects_to_sort.values_list('sort_order', flat=True))
# Check there are no unrecognised entries in the object_pks list. If so,
# throw an error. We only have to check that they're the same length because
# orders is built using only entries in object_pks, and all the pks are unique,
# so if their lengths are the same, the elements must match up exactly.
if len(orders) != len(object_pks):
pks = set(objects_to_sort.values_list('pk', flat=True))
message = 'The following object_pks are not in this queryset: {}'.format(
[pk for pk in object_pks if pk not in pks]
)
raise TypeError(message)
with transaction.atomic():
objects_to_sort.update(sort_order=models.F('sort_order') + max_value)
for pk, order in zip(object_pks, orders):
# Use update() to save a query per item and dodge the insertion sort
# code in save().
self.filter(pk=pk).update(sort_order=order)
# Return the operated-on queryset for convenience.
return objects_to_sort | def function[set_orders, parameter[self, object_pks]]:
constant[
Perform a mass update of sort_orders across the full queryset.
Accepts a list, object_pks, of the intended order for the objects.
Works as follows:
- Compile a list of all sort orders in the queryset. Leave out anything that
isn't in the object_pks list - this deals with pagination and any
inconsistencies.
- Get the maximum among all model object sort orders. Update the queryset to add
it to all the existing sort order values. This lifts them 'out of the way' of
unique_together clashes when setting the intended sort orders.
- Set the sort order on each object. Use only sort_order values that the objects
had before calling this method, so they get rearranged in place.
Performs O(n) queries.
]
variable[objects_to_sort] assign[=] call[name[self].filter, parameter[]]
variable[max_value] assign[=] call[call[call[name[self].model.objects.all, parameter[]].aggregate, parameter[call[name[models].Max, parameter[constant[sort_order]]]]]][constant[sort_order__max]]
variable[orders] assign[=] call[name[list], parameter[call[name[objects_to_sort].values_list, parameter[constant[sort_order]]]]]
if compare[call[name[len], parameter[name[orders]]] not_equal[!=] call[name[len], parameter[name[object_pks]]]] begin[:]
variable[pks] assign[=] call[name[set], parameter[call[name[objects_to_sort].values_list, parameter[constant[pk]]]]]
variable[message] assign[=] call[constant[The following object_pks are not in this queryset: {}].format, parameter[<ast.ListComp object at 0x7da20c991a80>]]
<ast.Raise object at 0x7da20c993760>
with call[name[transaction].atomic, parameter[]] begin[:]
call[name[objects_to_sort].update, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c993c70>, <ast.Name object at 0x7da20c9917b0>]]] in starred[call[name[zip], parameter[name[object_pks], name[orders]]]] begin[:]
call[call[name[self].filter, parameter[]].update, parameter[]]
return[name[objects_to_sort]] | keyword[def] identifier[set_orders] ( identifier[self] , identifier[object_pks] ):
literal[string]
identifier[objects_to_sort] = identifier[self] . identifier[filter] ( identifier[pk__in] = identifier[object_pks] )
identifier[max_value] = identifier[self] . identifier[model] . identifier[objects] . identifier[all] (). identifier[aggregate] (
identifier[models] . identifier[Max] ( literal[string] )
)[ literal[string] ]
identifier[orders] = identifier[list] ( identifier[objects_to_sort] . identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ))
keyword[if] identifier[len] ( identifier[orders] )!= identifier[len] ( identifier[object_pks] ):
identifier[pks] = identifier[set] ( identifier[objects_to_sort] . identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] ))
identifier[message] = literal[string] . identifier[format] (
[ identifier[pk] keyword[for] identifier[pk] keyword[in] identifier[object_pks] keyword[if] identifier[pk] keyword[not] keyword[in] identifier[pks] ]
)
keyword[raise] identifier[TypeError] ( identifier[message] )
keyword[with] identifier[transaction] . identifier[atomic] ():
identifier[objects_to_sort] . identifier[update] ( identifier[sort_order] = identifier[models] . identifier[F] ( literal[string] )+ identifier[max_value] )
keyword[for] identifier[pk] , identifier[order] keyword[in] identifier[zip] ( identifier[object_pks] , identifier[orders] ):
identifier[self] . identifier[filter] ( identifier[pk] = identifier[pk] ). identifier[update] ( identifier[sort_order] = identifier[order] )
keyword[return] identifier[objects_to_sort] | def set_orders(self, object_pks):
"""
Perform a mass update of sort_orders across the full queryset.
Accepts a list, object_pks, of the intended order for the objects.
Works as follows:
- Compile a list of all sort orders in the queryset. Leave out anything that
isn't in the object_pks list - this deals with pagination and any
inconsistencies.
- Get the maximum among all model object sort orders. Update the queryset to add
it to all the existing sort order values. This lifts them 'out of the way' of
unique_together clashes when setting the intended sort orders.
- Set the sort order on each object. Use only sort_order values that the objects
had before calling this method, so they get rearranged in place.
Performs O(n) queries.
"""
objects_to_sort = self.filter(pk__in=object_pks)
max_value = self.model.objects.all().aggregate(models.Max('sort_order'))['sort_order__max']
# Call list() on the values right away, so they don't get affected by the
# update() later (since values_list() is lazy).
orders = list(objects_to_sort.values_list('sort_order', flat=True))
# Check there are no unrecognised entries in the object_pks list. If so,
# throw an error. We only have to check that they're the same length because
# orders is built using only entries in object_pks, and all the pks are unique,
# so if their lengths are the same, the elements must match up exactly.
if len(orders) != len(object_pks):
pks = set(objects_to_sort.values_list('pk', flat=True))
message = 'The following object_pks are not in this queryset: {}'.format([pk for pk in object_pks if pk not in pks])
raise TypeError(message) # depends on [control=['if'], data=[]]
with transaction.atomic():
objects_to_sort.update(sort_order=models.F('sort_order') + max_value)
for (pk, order) in zip(object_pks, orders):
# Use update() to save a query per item and dodge the insertion sort
# code in save().
self.filter(pk=pk).update(sort_order=order) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]]
# Return the operated-on queryset for convenience.
return objects_to_sort |
def check_spam(self, ip=None, email=None, name=None, login=None, realname=None,
subject=None, body=None, subject_type='plain', body_type='plain'):
""" http://api.yandex.ru/cleanweb/doc/dg/concepts/check-spam.xml
subject_type = plain|html|bbcode
body_type = plain|html|bbcode
"""
data = {'ip': ip, 'email': email, 'name': name, 'login': login, 'realname': realname,
'body-%s' % body_type: body, 'subject-%s' % subject_type: subject}
r = self.request('post', 'http://cleanweb-api.yandex.ru/1.0/check-spam', data=data)
root = ET.fromstring(r.content)
return {
'id': root.findtext('id'),
'spam_flag': yesnobool(root.find('text').attrib['spam-flag']),
'links': [(link.attrib['href'], yesnobool(link.attrib['spam-flag'])) for link in root.findall('./links/link')]
} | def function[check_spam, parameter[self, ip, email, name, login, realname, subject, body, subject_type, body_type]]:
constant[ http://api.yandex.ru/cleanweb/doc/dg/concepts/check-spam.xml
subject_type = plain|html|bbcode
body_type = plain|html|bbcode
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc8ca0>, <ast.Constant object at 0x7da18bccbca0>, <ast.Constant object at 0x7da18bcc9960>, <ast.Constant object at 0x7da18bccb4c0>, <ast.Constant object at 0x7da18bcc9450>, <ast.BinOp object at 0x7da18bcc87c0>, <ast.BinOp object at 0x7da18bcc81c0>], [<ast.Name object at 0x7da18bcc91b0>, <ast.Name object at 0x7da18bcc8370>, <ast.Name object at 0x7da18bccb430>, <ast.Name object at 0x7da18bccbe50>, <ast.Name object at 0x7da18bccb640>, <ast.Name object at 0x7da18bcca140>, <ast.Name object at 0x7da18bcca2f0>]]
variable[r] assign[=] call[name[self].request, parameter[constant[post], constant[http://cleanweb-api.yandex.ru/1.0/check-spam]]]
variable[root] assign[=] call[name[ET].fromstring, parameter[name[r].content]]
return[dictionary[[<ast.Constant object at 0x7da18bcc9270>, <ast.Constant object at 0x7da18bccbfa0>, <ast.Constant object at 0x7da18bccb160>], [<ast.Call object at 0x7da18bccabf0>, <ast.Call object at 0x7da18bcc94e0>, <ast.ListComp object at 0x7da18bcc8be0>]]] | keyword[def] identifier[check_spam] ( identifier[self] , identifier[ip] = keyword[None] , identifier[email] = keyword[None] , identifier[name] = keyword[None] , identifier[login] = keyword[None] , identifier[realname] = keyword[None] ,
identifier[subject] = keyword[None] , identifier[body] = keyword[None] , identifier[subject_type] = literal[string] , identifier[body_type] = literal[string] ):
literal[string]
identifier[data] ={ literal[string] : identifier[ip] , literal[string] : identifier[email] , literal[string] : identifier[name] , literal[string] : identifier[login] , literal[string] : identifier[realname] ,
literal[string] % identifier[body_type] : identifier[body] , literal[string] % identifier[subject_type] : identifier[subject] }
identifier[r] = identifier[self] . identifier[request] ( literal[string] , literal[string] , identifier[data] = identifier[data] )
identifier[root] = identifier[ET] . identifier[fromstring] ( identifier[r] . identifier[content] )
keyword[return] {
literal[string] : identifier[root] . identifier[findtext] ( literal[string] ),
literal[string] : identifier[yesnobool] ( identifier[root] . identifier[find] ( literal[string] ). identifier[attrib] [ literal[string] ]),
literal[string] :[( identifier[link] . identifier[attrib] [ literal[string] ], identifier[yesnobool] ( identifier[link] . identifier[attrib] [ literal[string] ])) keyword[for] identifier[link] keyword[in] identifier[root] . identifier[findall] ( literal[string] )]
} | def check_spam(self, ip=None, email=None, name=None, login=None, realname=None, subject=None, body=None, subject_type='plain', body_type='plain'):
""" http://api.yandex.ru/cleanweb/doc/dg/concepts/check-spam.xml
subject_type = plain|html|bbcode
body_type = plain|html|bbcode
"""
data = {'ip': ip, 'email': email, 'name': name, 'login': login, 'realname': realname, 'body-%s' % body_type: body, 'subject-%s' % subject_type: subject}
r = self.request('post', 'http://cleanweb-api.yandex.ru/1.0/check-spam', data=data)
root = ET.fromstring(r.content)
return {'id': root.findtext('id'), 'spam_flag': yesnobool(root.find('text').attrib['spam-flag']), 'links': [(link.attrib['href'], yesnobool(link.attrib['spam-flag'])) for link in root.findall('./links/link')]} |
def update_attribute(self, attr, value):
"""Set the value of a workspace attribute."""
update = [fapi._attr_up(attr, value)]
r = fapi.update_workspace_attributes(self.namespace, self.name,
update, self.api_url)
fapi._check_response_code(r, 200) | def function[update_attribute, parameter[self, attr, value]]:
constant[Set the value of a workspace attribute.]
variable[update] assign[=] list[[<ast.Call object at 0x7da1b1ba8070>]]
variable[r] assign[=] call[name[fapi].update_workspace_attributes, parameter[name[self].namespace, name[self].name, name[update], name[self].api_url]]
call[name[fapi]._check_response_code, parameter[name[r], constant[200]]] | keyword[def] identifier[update_attribute] ( identifier[self] , identifier[attr] , identifier[value] ):
literal[string]
identifier[update] =[ identifier[fapi] . identifier[_attr_up] ( identifier[attr] , identifier[value] )]
identifier[r] = identifier[fapi] . identifier[update_workspace_attributes] ( identifier[self] . identifier[namespace] , identifier[self] . identifier[name] ,
identifier[update] , identifier[self] . identifier[api_url] )
identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] ) | def update_attribute(self, attr, value):
"""Set the value of a workspace attribute."""
update = [fapi._attr_up(attr, value)]
r = fapi.update_workspace_attributes(self.namespace, self.name, update, self.api_url)
fapi._check_response_code(r, 200) |
def cursor_pagedown(self, stats):
"""Set next page."""
if self._current_page + 1 < self._page_max:
self._current_page += 1
else:
self._current_page = 0
self.cursor_position = 0 | def function[cursor_pagedown, parameter[self, stats]]:
constant[Set next page.]
if compare[binary_operation[name[self]._current_page + constant[1]] less[<] name[self]._page_max] begin[:]
<ast.AugAssign object at 0x7da18eb547c0>
name[self].cursor_position assign[=] constant[0] | keyword[def] identifier[cursor_pagedown] ( identifier[self] , identifier[stats] ):
literal[string]
keyword[if] identifier[self] . identifier[_current_page] + literal[int] < identifier[self] . identifier[_page_max] :
identifier[self] . identifier[_current_page] += literal[int]
keyword[else] :
identifier[self] . identifier[_current_page] = literal[int]
identifier[self] . identifier[cursor_position] = literal[int] | def cursor_pagedown(self, stats):
"""Set next page."""
if self._current_page + 1 < self._page_max:
self._current_page += 1 # depends on [control=['if'], data=[]]
else:
self._current_page = 0
self.cursor_position = 0 |
def create_parser(arg_parser: ArgumentParser = None) -> ArgumentParser:
"""
Creates an argument parser populated with the arg formats for the server
command.
"""
parser = arg_parser or ArgumentParser()
parser.description = 'Cauldron kernel server'
parser.add_argument(
'-p', '--port',
dest='port',
type=int,
default=5010
)
parser.add_argument(
'-d', '--debug',
dest='debug',
default=False,
action='store_true'
)
parser.add_argument(
'-v', '--version',
dest='version',
default=False,
action='store_true'
)
parser.add_argument(
'-c', '--code',
dest='authentication_code',
type=str,
default=''
)
parser.add_argument(
'-n', '--name',
dest='host',
type=str,
default=None
)
return parser | def function[create_parser, parameter[arg_parser]]:
constant[
Creates an argument parser populated with the arg formats for the server
command.
]
variable[parser] assign[=] <ast.BoolOp object at 0x7da18c4ce7a0>
name[parser].description assign[=] constant[Cauldron kernel server]
call[name[parser].add_argument, parameter[constant[-p], constant[--port]]]
call[name[parser].add_argument, parameter[constant[-d], constant[--debug]]]
call[name[parser].add_argument, parameter[constant[-v], constant[--version]]]
call[name[parser].add_argument, parameter[constant[-c], constant[--code]]]
call[name[parser].add_argument, parameter[constant[-n], constant[--name]]]
return[name[parser]] | keyword[def] identifier[create_parser] ( identifier[arg_parser] : identifier[ArgumentParser] = keyword[None] )-> identifier[ArgumentParser] :
literal[string]
identifier[parser] = identifier[arg_parser] keyword[or] identifier[ArgumentParser] ()
identifier[parser] . identifier[description] = literal[string]
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] ,
identifier[dest] = literal[string] ,
identifier[type] = identifier[int] ,
identifier[default] = literal[int]
)
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] ,
identifier[dest] = literal[string] ,
identifier[default] = keyword[False] ,
identifier[action] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] ,
identifier[dest] = literal[string] ,
identifier[default] = keyword[False] ,
identifier[action] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] ,
identifier[dest] = literal[string] ,
identifier[type] = identifier[str] ,
identifier[default] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] ,
identifier[dest] = literal[string] ,
identifier[type] = identifier[str] ,
identifier[default] = keyword[None]
)
keyword[return] identifier[parser] | def create_parser(arg_parser: ArgumentParser=None) -> ArgumentParser:
"""
Creates an argument parser populated with the arg formats for the server
command.
"""
parser = arg_parser or ArgumentParser()
parser.description = 'Cauldron kernel server'
parser.add_argument('-p', '--port', dest='port', type=int, default=5010)
parser.add_argument('-d', '--debug', dest='debug', default=False, action='store_true')
parser.add_argument('-v', '--version', dest='version', default=False, action='store_true')
parser.add_argument('-c', '--code', dest='authentication_code', type=str, default='')
parser.add_argument('-n', '--name', dest='host', type=str, default=None)
return parser |
def select_eps(xmrs, nodeid=None, iv=None, label=None, pred=None):
"""
Return the list of matching elementary predications in *xmrs*.
:class:`~delphin.mrs.components.ElementaryPredication` objects for
*xmrs* match if their `nodeid` matches *nodeid*,
`intrinsic_variable` matches *iv*, `label` matches *label*, and
`pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters
are ignored if they are `None`.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
query
nodeid (optional): nodeid to match
iv (str, optional): intrinsic variable to match
label (str, optional): label to match
pred (str, :class:`~delphin.mrs.components.Pred`, optional):
predicate to match
Returns:
list: matching elementary predications
"""
epmatch = lambda n: ((nodeid is None or n.nodeid == nodeid) and
(iv is None or n.iv == iv) and
(label is None or n.label == label) and
(pred is None or n.pred == pred))
return list(filter(epmatch, xmrs.eps())) | def function[select_eps, parameter[xmrs, nodeid, iv, label, pred]]:
constant[
Return the list of matching elementary predications in *xmrs*.
:class:`~delphin.mrs.components.ElementaryPredication` objects for
*xmrs* match if their `nodeid` matches *nodeid*,
`intrinsic_variable` matches *iv*, `label` matches *label*, and
`pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters
are ignored if they are `None`.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
query
nodeid (optional): nodeid to match
iv (str, optional): intrinsic variable to match
label (str, optional): label to match
pred (str, :class:`~delphin.mrs.components.Pred`, optional):
predicate to match
Returns:
list: matching elementary predications
]
variable[epmatch] assign[=] <ast.Lambda object at 0x7da1b033e020>
return[call[name[list], parameter[call[name[filter], parameter[name[epmatch], call[name[xmrs].eps, parameter[]]]]]]] | keyword[def] identifier[select_eps] ( identifier[xmrs] , identifier[nodeid] = keyword[None] , identifier[iv] = keyword[None] , identifier[label] = keyword[None] , identifier[pred] = keyword[None] ):
literal[string]
identifier[epmatch] = keyword[lambda] identifier[n] :(( identifier[nodeid] keyword[is] keyword[None] keyword[or] identifier[n] . identifier[nodeid] == identifier[nodeid] ) keyword[and]
( identifier[iv] keyword[is] keyword[None] keyword[or] identifier[n] . identifier[iv] == identifier[iv] ) keyword[and]
( identifier[label] keyword[is] keyword[None] keyword[or] identifier[n] . identifier[label] == identifier[label] ) keyword[and]
( identifier[pred] keyword[is] keyword[None] keyword[or] identifier[n] . identifier[pred] == identifier[pred] ))
keyword[return] identifier[list] ( identifier[filter] ( identifier[epmatch] , identifier[xmrs] . identifier[eps] ())) | def select_eps(xmrs, nodeid=None, iv=None, label=None, pred=None):
"""
Return the list of matching elementary predications in *xmrs*.
:class:`~delphin.mrs.components.ElementaryPredication` objects for
*xmrs* match if their `nodeid` matches *nodeid*,
`intrinsic_variable` matches *iv*, `label` matches *label*, and
`pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters
are ignored if they are `None`.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
query
nodeid (optional): nodeid to match
iv (str, optional): intrinsic variable to match
label (str, optional): label to match
pred (str, :class:`~delphin.mrs.components.Pred`, optional):
predicate to match
Returns:
list: matching elementary predications
"""
epmatch = lambda n: (nodeid is None or n.nodeid == nodeid) and (iv is None or n.iv == iv) and (label is None or n.label == label) and (pred is None or n.pred == pred)
return list(filter(epmatch, xmrs.eps())) |
def hil_controls_encode(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode):
'''
Sent from autopilot to simulation. Hardware in the loop control
outputs
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
roll_ailerons : Control output -1 .. 1 (float)
pitch_elevator : Control output -1 .. 1 (float)
yaw_rudder : Control output -1 .. 1 (float)
throttle : Throttle 0 .. 1 (float)
aux1 : Aux 1, -1 .. 1 (float)
aux2 : Aux 2, -1 .. 1 (float)
aux3 : Aux 3, -1 .. 1 (float)
aux4 : Aux 4, -1 .. 1 (float)
mode : System mode (MAV_MODE) (uint8_t)
nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t)
'''
return MAVLink_hil_controls_message(time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode) | def function[hil_controls_encode, parameter[self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode]]:
constant[
Sent from autopilot to simulation. Hardware in the loop control
outputs
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
roll_ailerons : Control output -1 .. 1 (float)
pitch_elevator : Control output -1 .. 1 (float)
yaw_rudder : Control output -1 .. 1 (float)
throttle : Throttle 0 .. 1 (float)
aux1 : Aux 1, -1 .. 1 (float)
aux2 : Aux 2, -1 .. 1 (float)
aux3 : Aux 3, -1 .. 1 (float)
aux4 : Aux 4, -1 .. 1 (float)
mode : System mode (MAV_MODE) (uint8_t)
nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t)
]
return[call[name[MAVLink_hil_controls_message], parameter[name[time_usec], name[roll_ailerons], name[pitch_elevator], name[yaw_rudder], name[throttle], name[aux1], name[aux2], name[aux3], name[aux4], name[mode], name[nav_mode]]]] | keyword[def] identifier[hil_controls_encode] ( identifier[self] , identifier[time_usec] , identifier[roll_ailerons] , identifier[pitch_elevator] , identifier[yaw_rudder] , identifier[throttle] , identifier[aux1] , identifier[aux2] , identifier[aux3] , identifier[aux4] , identifier[mode] , identifier[nav_mode] ):
literal[string]
keyword[return] identifier[MAVLink_hil_controls_message] ( identifier[time_usec] , identifier[roll_ailerons] , identifier[pitch_elevator] , identifier[yaw_rudder] , identifier[throttle] , identifier[aux1] , identifier[aux2] , identifier[aux3] , identifier[aux4] , identifier[mode] , identifier[nav_mode] ) | def hil_controls_encode(self, time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode):
"""
Sent from autopilot to simulation. Hardware in the loop control
outputs
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
roll_ailerons : Control output -1 .. 1 (float)
pitch_elevator : Control output -1 .. 1 (float)
yaw_rudder : Control output -1 .. 1 (float)
throttle : Throttle 0 .. 1 (float)
aux1 : Aux 1, -1 .. 1 (float)
aux2 : Aux 2, -1 .. 1 (float)
aux3 : Aux 3, -1 .. 1 (float)
aux4 : Aux 4, -1 .. 1 (float)
mode : System mode (MAV_MODE) (uint8_t)
nav_mode : Navigation mode (MAV_NAV_MODE) (uint8_t)
"""
return MAVLink_hil_controls_message(time_usec, roll_ailerons, pitch_elevator, yaw_rudder, throttle, aux1, aux2, aux3, aux4, mode, nav_mode) |
def time_label(intvl, return_val=True):
"""Create time interval label for aospy data I/O."""
# Monthly labels are 2 digit integers: '01' for jan, '02' for feb, etc.
if type(intvl) in [list, tuple, np.ndarray] and len(intvl) == 1:
label = '{:02}'.format(intvl[0])
value = np.array(intvl)
elif type(intvl) == int and intvl in range(1, 13):
label = '{:02}'.format(intvl)
value = np.array([intvl])
# Seasonal and annual time labels are short strings.
else:
labels = {'jfm': (1, 2, 3),
'fma': (2, 3, 4),
'mam': (3, 4, 5),
'amj': (4, 5, 6),
'mjj': (5, 6, 7),
'jja': (6, 7, 8),
'jas': (7, 8, 9),
'aso': (8, 9, 10),
'son': (9, 10, 11),
'ond': (10, 11, 12),
'ndj': (11, 12, 1),
'djf': (1, 2, 12),
'jjas': (6, 7, 8, 9),
'djfm': (12, 1, 2, 3),
'ann': range(1, 13)}
for lbl, vals in labels.items():
if intvl == lbl or set(intvl) == set(vals):
label = lbl
value = np.array(vals)
break
if return_val:
return label, value
else:
return label | def function[time_label, parameter[intvl, return_val]]:
constant[Create time interval label for aospy data I/O.]
if <ast.BoolOp object at 0x7da207f9a5c0> begin[:]
variable[label] assign[=] call[constant[{:02}].format, parameter[call[name[intvl]][constant[0]]]]
variable[value] assign[=] call[name[np].array, parameter[name[intvl]]]
if name[return_val] begin[:]
return[tuple[[<ast.Name object at 0x7da1b0494460>, <ast.Name object at 0x7da1b04941c0>]]] | keyword[def] identifier[time_label] ( identifier[intvl] , identifier[return_val] = keyword[True] ):
literal[string]
keyword[if] identifier[type] ( identifier[intvl] ) keyword[in] [ identifier[list] , identifier[tuple] , identifier[np] . identifier[ndarray] ] keyword[and] identifier[len] ( identifier[intvl] )== literal[int] :
identifier[label] = literal[string] . identifier[format] ( identifier[intvl] [ literal[int] ])
identifier[value] = identifier[np] . identifier[array] ( identifier[intvl] )
keyword[elif] identifier[type] ( identifier[intvl] )== identifier[int] keyword[and] identifier[intvl] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[label] = literal[string] . identifier[format] ( identifier[intvl] )
identifier[value] = identifier[np] . identifier[array] ([ identifier[intvl] ])
keyword[else] :
identifier[labels] ={ literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] , literal[int] ),
literal[string] :( literal[int] , literal[int] , literal[int] , literal[int] ),
literal[string] : identifier[range] ( literal[int] , literal[int] )}
keyword[for] identifier[lbl] , identifier[vals] keyword[in] identifier[labels] . identifier[items] ():
keyword[if] identifier[intvl] == identifier[lbl] keyword[or] identifier[set] ( identifier[intvl] )== identifier[set] ( identifier[vals] ):
identifier[label] = identifier[lbl]
identifier[value] = identifier[np] . identifier[array] ( identifier[vals] )
keyword[break]
keyword[if] identifier[return_val] :
keyword[return] identifier[label] , identifier[value]
keyword[else] :
keyword[return] identifier[label] | def time_label(intvl, return_val=True):
"""Create time interval label for aospy data I/O."""
# Monthly labels are 2 digit integers: '01' for jan, '02' for feb, etc.
if type(intvl) in [list, tuple, np.ndarray] and len(intvl) == 1:
label = '{:02}'.format(intvl[0])
value = np.array(intvl) # depends on [control=['if'], data=[]]
elif type(intvl) == int and intvl in range(1, 13):
label = '{:02}'.format(intvl)
value = np.array([intvl]) # depends on [control=['if'], data=[]]
else:
# Seasonal and annual time labels are short strings.
labels = {'jfm': (1, 2, 3), 'fma': (2, 3, 4), 'mam': (3, 4, 5), 'amj': (4, 5, 6), 'mjj': (5, 6, 7), 'jja': (6, 7, 8), 'jas': (7, 8, 9), 'aso': (8, 9, 10), 'son': (9, 10, 11), 'ond': (10, 11, 12), 'ndj': (11, 12, 1), 'djf': (1, 2, 12), 'jjas': (6, 7, 8, 9), 'djfm': (12, 1, 2, 3), 'ann': range(1, 13)}
for (lbl, vals) in labels.items():
if intvl == lbl or set(intvl) == set(vals):
label = lbl
value = np.array(vals)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if return_val:
return (label, value) # depends on [control=['if'], data=[]]
else:
return label |
def quat2Pitch(qw, qx, qy, qz):
'''
Translates from Quaternion to Pitch.
@param qw,qx,qy,qz: Quaternion values
@type qw,qx,qy,qz: float
@return Pitch value translated from Quaternion
'''
rotateYa0=-2.0*(qx*qz - qw*qy)
rotateY=0.0
if(rotateYa0 >= 1.0):
rotateY = pi/2.0
elif(rotateYa0 <= -1.0):
rotateY = -pi/2.0
else:
rotateY = asin(rotateYa0)
return rotateY | def function[quat2Pitch, parameter[qw, qx, qy, qz]]:
constant[
Translates from Quaternion to Pitch.
@param qw,qx,qy,qz: Quaternion values
@type qw,qx,qy,qz: float
@return Pitch value translated from Quaternion
]
variable[rotateYa0] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18ede5e10> * binary_operation[binary_operation[name[qx] * name[qz]] - binary_operation[name[qw] * name[qy]]]]
variable[rotateY] assign[=] constant[0.0]
if compare[name[rotateYa0] greater_or_equal[>=] constant[1.0]] begin[:]
variable[rotateY] assign[=] binary_operation[name[pi] / constant[2.0]]
return[name[rotateY]] | keyword[def] identifier[quat2Pitch] ( identifier[qw] , identifier[qx] , identifier[qy] , identifier[qz] ):
literal[string]
identifier[rotateYa0] =- literal[int] *( identifier[qx] * identifier[qz] - identifier[qw] * identifier[qy] )
identifier[rotateY] = literal[int]
keyword[if] ( identifier[rotateYa0] >= literal[int] ):
identifier[rotateY] = identifier[pi] / literal[int]
keyword[elif] ( identifier[rotateYa0] <=- literal[int] ):
identifier[rotateY] =- identifier[pi] / literal[int]
keyword[else] :
identifier[rotateY] = identifier[asin] ( identifier[rotateYa0] )
keyword[return] identifier[rotateY] | def quat2Pitch(qw, qx, qy, qz):
"""
Translates from Quaternion to Pitch.
@param qw,qx,qy,qz: Quaternion values
@type qw,qx,qy,qz: float
@return Pitch value translated from Quaternion
"""
rotateYa0 = -2.0 * (qx * qz - qw * qy)
rotateY = 0.0
if rotateYa0 >= 1.0:
rotateY = pi / 2.0 # depends on [control=['if'], data=[]]
elif rotateYa0 <= -1.0:
rotateY = -pi / 2.0 # depends on [control=['if'], data=[]]
else:
rotateY = asin(rotateYa0)
return rotateY |
def remove_blacklisted_filepaths(self, filepaths):
"""
Removes `filepaths` from blacklisted filepaths
Recommend passing in absolute filepaths but method will attempt
to convert to absolute filepaths based on current working directory.
"""
filepaths = util.to_absolute_paths(filepaths)
black_paths = self.blacklisted_filepaths
black_paths = util.remove_from_set(black_paths, filepaths) | def function[remove_blacklisted_filepaths, parameter[self, filepaths]]:
constant[
Removes `filepaths` from blacklisted filepaths
Recommend passing in absolute filepaths but method will attempt
to convert to absolute filepaths based on current working directory.
]
variable[filepaths] assign[=] call[name[util].to_absolute_paths, parameter[name[filepaths]]]
variable[black_paths] assign[=] name[self].blacklisted_filepaths
variable[black_paths] assign[=] call[name[util].remove_from_set, parameter[name[black_paths], name[filepaths]]] | keyword[def] identifier[remove_blacklisted_filepaths] ( identifier[self] , identifier[filepaths] ):
literal[string]
identifier[filepaths] = identifier[util] . identifier[to_absolute_paths] ( identifier[filepaths] )
identifier[black_paths] = identifier[self] . identifier[blacklisted_filepaths]
identifier[black_paths] = identifier[util] . identifier[remove_from_set] ( identifier[black_paths] , identifier[filepaths] ) | def remove_blacklisted_filepaths(self, filepaths):
"""
Removes `filepaths` from blacklisted filepaths
Recommend passing in absolute filepaths but method will attempt
to convert to absolute filepaths based on current working directory.
"""
filepaths = util.to_absolute_paths(filepaths)
black_paths = self.blacklisted_filepaths
black_paths = util.remove_from_set(black_paths, filepaths) |
def children(self, as_resources=False):
'''
method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
'''
children = [o for s,p,o in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))]
# if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving children as resources')
children = [ self.repo.get_resource(child) for child in children ]
return children | def function[children, parameter[self, as_resources]]:
constant[
method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
]
variable[children] assign[=] <ast.ListComp object at 0x7da1b2255ea0>
if name[as_resources] begin[:]
call[name[logger].debug, parameter[constant[retrieving children as resources]]]
variable[children] assign[=] <ast.ListComp object at 0x7da1b2254970>
return[name[children]] | keyword[def] identifier[children] ( identifier[self] , identifier[as_resources] = keyword[False] ):
literal[string]
identifier[children] =[ identifier[o] keyword[for] identifier[s] , identifier[p] , identifier[o] keyword[in] identifier[self] . identifier[rdf] . identifier[graph] . identifier[triples] (( keyword[None] , identifier[self] . identifier[rdf] . identifier[prefixes] . identifier[ldp] . identifier[contains] , keyword[None] ))]
keyword[if] identifier[as_resources] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[children] =[ identifier[self] . identifier[repo] . identifier[get_resource] ( identifier[child] ) keyword[for] identifier[child] keyword[in] identifier[children] ]
keyword[return] identifier[children] | def children(self, as_resources=False):
"""
method to return hierarchical children of this resource
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
"""
children = [o for (s, p, o) in self.rdf.graph.triples((None, self.rdf.prefixes.ldp.contains, None))] # if as_resources, issue GET requests for children and return
if as_resources:
logger.debug('retrieving children as resources')
children = [self.repo.get_resource(child) for child in children] # depends on [control=['if'], data=[]]
return children |
def is_mdgel(self):
"""File has MD Gel format."""
# TODO: this likely reads the second page from file
try:
ismdgel = self.pages[0].is_mdgel or self.pages[1].is_mdgel
if ismdgel:
self.is_uniform = False
return ismdgel
except IndexError:
return False | def function[is_mdgel, parameter[self]]:
constant[File has MD Gel format.]
<ast.Try object at 0x7da1b18aa9b0> | keyword[def] identifier[is_mdgel] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[ismdgel] = identifier[self] . identifier[pages] [ literal[int] ]. identifier[is_mdgel] keyword[or] identifier[self] . identifier[pages] [ literal[int] ]. identifier[is_mdgel]
keyword[if] identifier[ismdgel] :
identifier[self] . identifier[is_uniform] = keyword[False]
keyword[return] identifier[ismdgel]
keyword[except] identifier[IndexError] :
keyword[return] keyword[False] | def is_mdgel(self):
"""File has MD Gel format."""
# TODO: this likely reads the second page from file
try:
ismdgel = self.pages[0].is_mdgel or self.pages[1].is_mdgel
if ismdgel:
self.is_uniform = False # depends on [control=['if'], data=[]]
return ismdgel # depends on [control=['try'], data=[]]
except IndexError:
return False # depends on [control=['except'], data=[]] |
def set_r_value(self, r_var: str, *, notify_changed=True) -> None:
"""Used to signal changes to the ref var, which are kept in document controller. ugh."""
self.r_var = r_var
self._description_changed()
if notify_changed: # set to False to set the r-value at startup; avoid marking it as a change
self.__notify_description_changed() | def function[set_r_value, parameter[self, r_var]]:
constant[Used to signal changes to the ref var, which are kept in document controller. ugh.]
name[self].r_var assign[=] name[r_var]
call[name[self]._description_changed, parameter[]]
if name[notify_changed] begin[:]
call[name[self].__notify_description_changed, parameter[]] | keyword[def] identifier[set_r_value] ( identifier[self] , identifier[r_var] : identifier[str] ,*, identifier[notify_changed] = keyword[True] )-> keyword[None] :
literal[string]
identifier[self] . identifier[r_var] = identifier[r_var]
identifier[self] . identifier[_description_changed] ()
keyword[if] identifier[notify_changed] :
identifier[self] . identifier[__notify_description_changed] () | def set_r_value(self, r_var: str, *, notify_changed=True) -> None:
"""Used to signal changes to the ref var, which are kept in document controller. ugh."""
self.r_var = r_var
self._description_changed()
if notify_changed: # set to False to set the r-value at startup; avoid marking it as a change
self.__notify_description_changed() # depends on [control=['if'], data=[]] |
async def build_verify_payment_req(wallet_handle: int,
submitter_did: str,
receipt: str) -> (str, str):
"""
Builds Indy request for information to verify the payment receipt
:param wallet_handle: wallet handle (created by open_wallet).
:param submitter_did : (Option) DID of request sender
:param receipt: payment receipt to verify
:return: verify_txn_json: Indy request for verification receipt for transactions in the ledger
payment_method: used payment method
"""
logger = logging.getLogger(__name__)
logger.debug("build_verify_payment_req: >>> wallet_handle: %r, submitter_did: %r, receipt: %r",
wallet_handle,
submitter_did,
receipt)
if not hasattr(build_verify_payment_req, "cb"):
logger.debug("build_verify_payment_req: Creating callback")
build_verify_payment_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None
c_receipt = c_char_p(receipt.encode('utf-8'))
(verify_txn_json, payment_method) = await do_call('indy_build_verify_payment_req',
c_wallet_handle,
c_submitter_did,
c_receipt,
build_verify_payment_req.cb)
res = (verify_txn_json.decode(), payment_method.decode())
logger.debug("build_verify_payment_req: <<< res: %r", res)
return res | <ast.AsyncFunctionDef object at 0x7da18eb55090> | keyword[async] keyword[def] identifier[build_verify_payment_req] ( identifier[wallet_handle] : identifier[int] ,
identifier[submitter_did] : identifier[str] ,
identifier[receipt] : identifier[str] )->( identifier[str] , identifier[str] ):
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[wallet_handle] ,
identifier[submitter_did] ,
identifier[receipt] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[build_verify_payment_req] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[build_verify_payment_req] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_int32] , identifier[c_int32] , identifier[c_char_p] , identifier[c_char_p] ))
identifier[c_wallet_handle] = identifier[c_int32] ( identifier[wallet_handle] )
identifier[c_submitter_did] = identifier[c_char_p] ( identifier[submitter_did] . identifier[encode] ( literal[string] )) keyword[if] identifier[submitter_did] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
identifier[c_receipt] = identifier[c_char_p] ( identifier[receipt] . identifier[encode] ( literal[string] ))
( identifier[verify_txn_json] , identifier[payment_method] )= keyword[await] identifier[do_call] ( literal[string] ,
identifier[c_wallet_handle] ,
identifier[c_submitter_did] ,
identifier[c_receipt] ,
identifier[build_verify_payment_req] . identifier[cb] )
identifier[res] =( identifier[verify_txn_json] . identifier[decode] (), identifier[payment_method] . identifier[decode] ())
identifier[logger] . identifier[debug] ( literal[string] , identifier[res] )
keyword[return] identifier[res] | async def build_verify_payment_req(wallet_handle: int, submitter_did: str, receipt: str) -> (str, str):
"""
Builds Indy request for information to verify the payment receipt
:param wallet_handle: wallet handle (created by open_wallet).
:param submitter_did : (Option) DID of request sender
:param receipt: payment receipt to verify
:return: verify_txn_json: Indy request for verification receipt for transactions in the ledger
payment_method: used payment method
"""
logger = logging.getLogger(__name__)
logger.debug('build_verify_payment_req: >>> wallet_handle: %r, submitter_did: %r, receipt: %r', wallet_handle, submitter_did, receipt)
if not hasattr(build_verify_payment_req, 'cb'):
logger.debug('build_verify_payment_req: Creating callback')
build_verify_payment_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) # depends on [control=['if'], data=[]]
c_wallet_handle = c_int32(wallet_handle)
c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None
c_receipt = c_char_p(receipt.encode('utf-8'))
(verify_txn_json, payment_method) = await do_call('indy_build_verify_payment_req', c_wallet_handle, c_submitter_did, c_receipt, build_verify_payment_req.cb)
res = (verify_txn_json.decode(), payment_method.decode())
logger.debug('build_verify_payment_req: <<< res: %r', res)
return res |
def get_network_settings():
'''
Return the contents of the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.get_network_settings
'''
skip_etc_default_networking = (
__grains__['osfullname'] == 'Ubuntu' and
int(__grains__['osrelease'].split('.')[0]) >= 12)
if skip_etc_default_networking:
settings = {}
if __salt__['service.available']('networking'):
if __salt__['service.status']('networking'):
settings['networking'] = "yes"
else:
settings['networking'] = "no"
else:
settings['networking'] = "no"
hostname = _parse_hostname()
domainname = _parse_domainname()
searchdomain = _parse_searchdomain()
settings['hostname'] = hostname
settings['domainname'] = domainname
settings['searchdomain'] = searchdomain
else:
settings = _parse_current_network_settings()
try:
template = JINJA.get_template('display-network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template display-network.jinja')
return ''
network = template.render(settings)
return _read_temp(network) | def function[get_network_settings, parameter[]]:
constant[
Return the contents of the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.get_network_settings
]
variable[skip_etc_default_networking] assign[=] <ast.BoolOp object at 0x7da1b2036500>
if name[skip_etc_default_networking] begin[:]
variable[settings] assign[=] dictionary[[], []]
if call[call[name[__salt__]][constant[service.available]], parameter[constant[networking]]] begin[:]
if call[call[name[__salt__]][constant[service.status]], parameter[constant[networking]]] begin[:]
call[name[settings]][constant[networking]] assign[=] constant[yes]
variable[hostname] assign[=] call[name[_parse_hostname], parameter[]]
variable[domainname] assign[=] call[name[_parse_domainname], parameter[]]
variable[searchdomain] assign[=] call[name[_parse_searchdomain], parameter[]]
call[name[settings]][constant[hostname]] assign[=] name[hostname]
call[name[settings]][constant[domainname]] assign[=] name[domainname]
call[name[settings]][constant[searchdomain]] assign[=] name[searchdomain]
<ast.Try object at 0x7da1b1fa0190>
variable[network] assign[=] call[name[template].render, parameter[name[settings]]]
return[call[name[_read_temp], parameter[name[network]]]] | keyword[def] identifier[get_network_settings] ():
literal[string]
identifier[skip_etc_default_networking] =(
identifier[__grains__] [ literal[string] ]== literal[string] keyword[and]
identifier[int] ( identifier[__grains__] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ])>= literal[int] )
keyword[if] identifier[skip_etc_default_networking] :
identifier[settings] ={}
keyword[if] identifier[__salt__] [ literal[string] ]( literal[string] ):
keyword[if] identifier[__salt__] [ literal[string] ]( literal[string] ):
identifier[settings] [ literal[string] ]= literal[string]
keyword[else] :
identifier[settings] [ literal[string] ]= literal[string]
keyword[else] :
identifier[settings] [ literal[string] ]= literal[string]
identifier[hostname] = identifier[_parse_hostname] ()
identifier[domainname] = identifier[_parse_domainname] ()
identifier[searchdomain] = identifier[_parse_searchdomain] ()
identifier[settings] [ literal[string] ]= identifier[hostname]
identifier[settings] [ literal[string] ]= identifier[domainname]
identifier[settings] [ literal[string] ]= identifier[searchdomain]
keyword[else] :
identifier[settings] = identifier[_parse_current_network_settings] ()
keyword[try] :
identifier[template] = identifier[JINJA] . identifier[get_template] ( literal[string] )
keyword[except] identifier[jinja2] . identifier[exceptions] . identifier[TemplateNotFound] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] literal[string]
identifier[network] = identifier[template] . identifier[render] ( identifier[settings] )
keyword[return] identifier[_read_temp] ( identifier[network] ) | def get_network_settings():
"""
Return the contents of the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.get_network_settings
"""
skip_etc_default_networking = __grains__['osfullname'] == 'Ubuntu' and int(__grains__['osrelease'].split('.')[0]) >= 12
if skip_etc_default_networking:
settings = {}
if __salt__['service.available']('networking'):
if __salt__['service.status']('networking'):
settings['networking'] = 'yes' # depends on [control=['if'], data=[]]
else:
settings['networking'] = 'no' # depends on [control=['if'], data=[]]
else:
settings['networking'] = 'no'
hostname = _parse_hostname()
domainname = _parse_domainname()
searchdomain = _parse_searchdomain()
settings['hostname'] = hostname
settings['domainname'] = domainname
settings['searchdomain'] = searchdomain # depends on [control=['if'], data=[]]
else:
settings = _parse_current_network_settings()
try:
template = JINJA.get_template('display-network.jinja') # depends on [control=['try'], data=[]]
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template display-network.jinja')
return '' # depends on [control=['except'], data=[]]
network = template.render(settings)
return _read_temp(network) |
def _ts_value(position, counts, bkg, model, C_0_map):
"""
Compute TS value at a given pixel position using the approach described
in Stewart (2009).
Parameters
----------
position : tuple
Pixel position.
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source model map.
Returns
-------
TS : float
TS value at the given pixel position.
"""
extract_fn = _collect_wrapper(extract_large_array)
truncate_fn = _collect_wrapper(extract_small_array)
# Get data slices
counts_slice = extract_fn(counts, model, position)
bkg_slice = extract_fn(bkg, model, position)
C_0_slice = extract_fn(C_0_map, model, position)
model_slice = truncate_fn(model, counts, position)
# Flattened Arrays
counts_ = np.concatenate([t.flat for t in counts_slice])
bkg_ = np.concatenate([t.flat for t in bkg_slice])
model_ = np.concatenate([t.flat for t in model_slice])
C_0_ = np.concatenate([t.flat for t in C_0_slice])
C_0 = np.sum(C_0_)
root_fn = _sum_wrapper(_f_cash_root)
amplitude, niter = _root_amplitude_brentq(counts_, bkg_, model_,
root_fn=_f_cash_root)
if niter > MAX_NITER:
print('Exceeded maximum number of function evaluations!')
return np.nan, amplitude, niter
with np.errstate(invalid='ignore', divide='ignore'):
C_1 = f_cash_sum(amplitude, counts_, bkg_, model_)
# Compute and return TS value
return (C_0 - C_1) * np.sign(amplitude), amplitude, niter | def function[_ts_value, parameter[position, counts, bkg, model, C_0_map]]:
constant[
Compute TS value at a given pixel position using the approach described
in Stewart (2009).
Parameters
----------
position : tuple
Pixel position.
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source model map.
Returns
-------
TS : float
TS value at the given pixel position.
]
variable[extract_fn] assign[=] call[name[_collect_wrapper], parameter[name[extract_large_array]]]
variable[truncate_fn] assign[=] call[name[_collect_wrapper], parameter[name[extract_small_array]]]
variable[counts_slice] assign[=] call[name[extract_fn], parameter[name[counts], name[model], name[position]]]
variable[bkg_slice] assign[=] call[name[extract_fn], parameter[name[bkg], name[model], name[position]]]
variable[C_0_slice] assign[=] call[name[extract_fn], parameter[name[C_0_map], name[model], name[position]]]
variable[model_slice] assign[=] call[name[truncate_fn], parameter[name[model], name[counts], name[position]]]
variable[counts_] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da18eb55f60>]]
variable[bkg_] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da18eb54040>]]
variable[model_] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da18eb56740>]]
variable[C_0_] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da18eb552d0>]]
variable[C_0] assign[=] call[name[np].sum, parameter[name[C_0_]]]
variable[root_fn] assign[=] call[name[_sum_wrapper], parameter[name[_f_cash_root]]]
<ast.Tuple object at 0x7da18eb56b00> assign[=] call[name[_root_amplitude_brentq], parameter[name[counts_], name[bkg_], name[model_]]]
if compare[name[niter] greater[>] name[MAX_NITER]] begin[:]
call[name[print], parameter[constant[Exceeded maximum number of function evaluations!]]]
return[tuple[[<ast.Attribute object at 0x7da20c6c4d60>, <ast.Name object at 0x7da20c6c40a0>, <ast.Name object at 0x7da20c6c5360>]]]
with call[name[np].errstate, parameter[]] begin[:]
variable[C_1] assign[=] call[name[f_cash_sum], parameter[name[amplitude], name[counts_], name[bkg_], name[model_]]]
return[tuple[[<ast.BinOp object at 0x7da20c6c60b0>, <ast.Name object at 0x7da20c6c4a60>, <ast.Name object at 0x7da20c6c41c0>]]] | keyword[def] identifier[_ts_value] ( identifier[position] , identifier[counts] , identifier[bkg] , identifier[model] , identifier[C_0_map] ):
literal[string]
identifier[extract_fn] = identifier[_collect_wrapper] ( identifier[extract_large_array] )
identifier[truncate_fn] = identifier[_collect_wrapper] ( identifier[extract_small_array] )
identifier[counts_slice] = identifier[extract_fn] ( identifier[counts] , identifier[model] , identifier[position] )
identifier[bkg_slice] = identifier[extract_fn] ( identifier[bkg] , identifier[model] , identifier[position] )
identifier[C_0_slice] = identifier[extract_fn] ( identifier[C_0_map] , identifier[model] , identifier[position] )
identifier[model_slice] = identifier[truncate_fn] ( identifier[model] , identifier[counts] , identifier[position] )
identifier[counts_] = identifier[np] . identifier[concatenate] ([ identifier[t] . identifier[flat] keyword[for] identifier[t] keyword[in] identifier[counts_slice] ])
identifier[bkg_] = identifier[np] . identifier[concatenate] ([ identifier[t] . identifier[flat] keyword[for] identifier[t] keyword[in] identifier[bkg_slice] ])
identifier[model_] = identifier[np] . identifier[concatenate] ([ identifier[t] . identifier[flat] keyword[for] identifier[t] keyword[in] identifier[model_slice] ])
identifier[C_0_] = identifier[np] . identifier[concatenate] ([ identifier[t] . identifier[flat] keyword[for] identifier[t] keyword[in] identifier[C_0_slice] ])
identifier[C_0] = identifier[np] . identifier[sum] ( identifier[C_0_] )
identifier[root_fn] = identifier[_sum_wrapper] ( identifier[_f_cash_root] )
identifier[amplitude] , identifier[niter] = identifier[_root_amplitude_brentq] ( identifier[counts_] , identifier[bkg_] , identifier[model_] ,
identifier[root_fn] = identifier[_f_cash_root] )
keyword[if] identifier[niter] > identifier[MAX_NITER] :
identifier[print] ( literal[string] )
keyword[return] identifier[np] . identifier[nan] , identifier[amplitude] , identifier[niter]
keyword[with] identifier[np] . identifier[errstate] ( identifier[invalid] = literal[string] , identifier[divide] = literal[string] ):
identifier[C_1] = identifier[f_cash_sum] ( identifier[amplitude] , identifier[counts_] , identifier[bkg_] , identifier[model_] )
keyword[return] ( identifier[C_0] - identifier[C_1] )* identifier[np] . identifier[sign] ( identifier[amplitude] ), identifier[amplitude] , identifier[niter] | def _ts_value(position, counts, bkg, model, C_0_map):
"""
Compute TS value at a given pixel position using the approach described
in Stewart (2009).
Parameters
----------
position : tuple
Pixel position.
counts : `~numpy.ndarray`
Count map.
bkg : `~numpy.ndarray`
Background map.
model : `~numpy.ndarray`
Source model map.
Returns
-------
TS : float
TS value at the given pixel position.
"""
extract_fn = _collect_wrapper(extract_large_array)
truncate_fn = _collect_wrapper(extract_small_array)
# Get data slices
counts_slice = extract_fn(counts, model, position)
bkg_slice = extract_fn(bkg, model, position)
C_0_slice = extract_fn(C_0_map, model, position)
model_slice = truncate_fn(model, counts, position)
# Flattened Arrays
counts_ = np.concatenate([t.flat for t in counts_slice])
bkg_ = np.concatenate([t.flat for t in bkg_slice])
model_ = np.concatenate([t.flat for t in model_slice])
C_0_ = np.concatenate([t.flat for t in C_0_slice])
C_0 = np.sum(C_0_)
root_fn = _sum_wrapper(_f_cash_root)
(amplitude, niter) = _root_amplitude_brentq(counts_, bkg_, model_, root_fn=_f_cash_root)
if niter > MAX_NITER:
print('Exceeded maximum number of function evaluations!')
return (np.nan, amplitude, niter) # depends on [control=['if'], data=['niter']]
with np.errstate(invalid='ignore', divide='ignore'):
C_1 = f_cash_sum(amplitude, counts_, bkg_, model_) # depends on [control=['with'], data=[]]
# Compute and return TS value
return ((C_0 - C_1) * np.sign(amplitude), amplitude, niter) |
def show(self):
"""Shows the new colors on the pixels themselves if they haven't already
been autowritten.
The colors may or may not be showing after this function returns because
it may be done asynchronously."""
if self.brightness > 0.99:
neopixel_write(self.pin, self.buf)
else:
neopixel_write(self.pin, bytearray([int(i * self.brightness) for i in self.buf])) | def function[show, parameter[self]]:
constant[Shows the new colors on the pixels themselves if they haven't already
been autowritten.
The colors may or may not be showing after this function returns because
it may be done asynchronously.]
if compare[name[self].brightness greater[>] constant[0.99]] begin[:]
call[name[neopixel_write], parameter[name[self].pin, name[self].buf]] | keyword[def] identifier[show] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[brightness] > literal[int] :
identifier[neopixel_write] ( identifier[self] . identifier[pin] , identifier[self] . identifier[buf] )
keyword[else] :
identifier[neopixel_write] ( identifier[self] . identifier[pin] , identifier[bytearray] ([ identifier[int] ( identifier[i] * identifier[self] . identifier[brightness] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[buf] ])) | def show(self):
"""Shows the new colors on the pixels themselves if they haven't already
been autowritten.
The colors may or may not be showing after this function returns because
it may be done asynchronously."""
if self.brightness > 0.99:
neopixel_write(self.pin, self.buf) # depends on [control=['if'], data=[]]
else:
neopixel_write(self.pin, bytearray([int(i * self.brightness) for i in self.buf])) |
def runCLI():
"""
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class.
"""
args = docopt(__doc__, version='0.3.0')
try:
check_arguments(args)
command_list = ['genconfig', 'run', 'generate']
select = itemgetter('genconfig', 'run', 'generate')
selectedCommand = command_list[select(args).index(True)]
cmdClass = get_command_class(selectedCommand)
obj = cmdClass(args)
obj.execute_command()
except POSSIBLE_EXCEPTIONS as e:
print('\n', e, '\n') | def function[runCLI, parameter[]]:
constant[
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. The class for the required command is selected by a dynamic dispatch, and the command is executed through the execute_command() method of the command class.
]
variable[args] assign[=] call[name[docopt], parameter[name[__doc__]]]
<ast.Try object at 0x7da1b1506aa0> | keyword[def] identifier[runCLI] ():
literal[string]
identifier[args] = identifier[docopt] ( identifier[__doc__] , identifier[version] = literal[string] )
keyword[try] :
identifier[check_arguments] ( identifier[args] )
identifier[command_list] =[ literal[string] , literal[string] , literal[string] ]
identifier[select] = identifier[itemgetter] ( literal[string] , literal[string] , literal[string] )
identifier[selectedCommand] = identifier[command_list] [ identifier[select] ( identifier[args] ). identifier[index] ( keyword[True] )]
identifier[cmdClass] = identifier[get_command_class] ( identifier[selectedCommand] )
identifier[obj] = identifier[cmdClass] ( identifier[args] )
identifier[obj] . identifier[execute_command] ()
keyword[except] identifier[POSSIBLE_EXCEPTIONS] keyword[as] identifier[e] :
identifier[print] ( literal[string] , identifier[e] , literal[string] ) | def runCLI():
"""
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. The class for the required command is selected by a dynamic dispatch, and the command is executed through the execute_command() method of the command class.
"""
args = docopt(__doc__, version='0.3.0')
try:
check_arguments(args)
command_list = ['genconfig', 'run', 'generate']
select = itemgetter('genconfig', 'run', 'generate')
selectedCommand = command_list[select(args).index(True)]
cmdClass = get_command_class(selectedCommand)
obj = cmdClass(args)
obj.execute_command() # depends on [control=['try'], data=[]]
except POSSIBLE_EXCEPTIONS as e:
print('\n', e, '\n') # depends on [control=['except'], data=['e']] |
def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplecm_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close() | def function[delete, parameter[self]]:
constant[
Destructor.
]
if name[self].maplesat begin[:]
call[name[pysolvers].maplecm_del, parameter[name[self].maplesat]]
name[self].maplesat assign[=] constant[None]
if name[self].prfile begin[:]
call[name[self].prfile.close, parameter[]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[maplesat] :
identifier[pysolvers] . identifier[maplecm_del] ( identifier[self] . identifier[maplesat] )
identifier[self] . identifier[maplesat] = keyword[None]
keyword[if] identifier[self] . identifier[prfile] :
identifier[self] . identifier[prfile] . identifier[close] () | def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplecm_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def _error(self, exc_info):
""" Retrieves the error info """
if self.exc_info:
if self.traceback:
return exc_info
return exc_info[:2]
return exc_info[1] | def function[_error, parameter[self, exc_info]]:
constant[ Retrieves the error info ]
if name[self].exc_info begin[:]
if name[self].traceback begin[:]
return[name[exc_info]]
return[call[name[exc_info]][<ast.Slice object at 0x7da2041db4f0>]]
return[call[name[exc_info]][constant[1]]] | keyword[def] identifier[_error] ( identifier[self] , identifier[exc_info] ):
literal[string]
keyword[if] identifier[self] . identifier[exc_info] :
keyword[if] identifier[self] . identifier[traceback] :
keyword[return] identifier[exc_info]
keyword[return] identifier[exc_info] [: literal[int] ]
keyword[return] identifier[exc_info] [ literal[int] ] | def _error(self, exc_info):
""" Retrieves the error info """
if self.exc_info:
if self.traceback:
return exc_info # depends on [control=['if'], data=[]]
return exc_info[:2] # depends on [control=['if'], data=[]]
return exc_info[1] |
def add_predicate(self, name: str, function: Callable, side_arguments: List[str] = None):
"""
Adds a predicate to this domain language. Typically you do this with the ``@predicate``
decorator on the methods in your class. But, if you need to for whatever reason, you can
also call this function yourself with a (type-annotated) function to add it to your
language.
Parameters
----------
name : ``str``
The name that we will use in the induced language for this function.
function : ``Callable``
The function that gets called when executing a predicate with the given name.
side_arguments : ``List[str]``, optional
If given, we will ignore these arguments for the purposes of grammar induction. This
is to allow passing extra arguments from the decoder state that are not explicitly part
of the language the decoder produces, such as the decoder's attention over the question
when a terminal was predicted. If you use this functionality, you also `must` use
``language.execute_action_sequence()`` instead of ``language.execute()``, and you must
pass the additional side arguments needed to that function. See
:func:`execute_action_sequence` for more information.
"""
side_arguments = side_arguments or []
signature = inspect.signature(function)
argument_types = [param.annotation for name, param in signature.parameters.items()
if name not in side_arguments]
return_type = signature.return_annotation
argument_nltk_types: List[PredicateType] = [PredicateType.get_type(arg_type)
for arg_type in argument_types]
return_nltk_type = PredicateType.get_type(return_type)
function_nltk_type = PredicateType.get_function_type(argument_nltk_types, return_nltk_type)
self._functions[name] = function
self._function_types[name].append(function_nltk_type) | def function[add_predicate, parameter[self, name, function, side_arguments]]:
constant[
Adds a predicate to this domain language. Typically you do this with the ``@predicate``
decorator on the methods in your class. But, if you need to for whatever reason, you can
also call this function yourself with a (type-annotated) function to add it to your
language.
Parameters
----------
name : ``str``
The name that we will use in the induced language for this function.
function : ``Callable``
The function that gets called when executing a predicate with the given name.
side_arguments : ``List[str]``, optional
If given, we will ignore these arguments for the purposes of grammar induction. This
is to allow passing extra arguments from the decoder state that are not explicitly part
of the language the decoder produces, such as the decoder's attention over the question
when a terminal was predicted. If you use this functionality, you also `must` use
``language.execute_action_sequence()`` instead of ``language.execute()``, and you must
pass the additional side arguments needed to that function. See
:func:`execute_action_sequence` for more information.
]
variable[side_arguments] assign[=] <ast.BoolOp object at 0x7da20c9912a0>
variable[signature] assign[=] call[name[inspect].signature, parameter[name[function]]]
variable[argument_types] assign[=] <ast.ListComp object at 0x7da20c993e50>
variable[return_type] assign[=] name[signature].return_annotation
<ast.AnnAssign object at 0x7da20c9922c0>
variable[return_nltk_type] assign[=] call[name[PredicateType].get_type, parameter[name[return_type]]]
variable[function_nltk_type] assign[=] call[name[PredicateType].get_function_type, parameter[name[argument_nltk_types], name[return_nltk_type]]]
call[name[self]._functions][name[name]] assign[=] name[function]
call[call[name[self]._function_types][name[name]].append, parameter[name[function_nltk_type]]] | keyword[def] identifier[add_predicate] ( identifier[self] , identifier[name] : identifier[str] , identifier[function] : identifier[Callable] , identifier[side_arguments] : identifier[List] [ identifier[str] ]= keyword[None] ):
literal[string]
identifier[side_arguments] = identifier[side_arguments] keyword[or] []
identifier[signature] = identifier[inspect] . identifier[signature] ( identifier[function] )
identifier[argument_types] =[ identifier[param] . identifier[annotation] keyword[for] identifier[name] , identifier[param] keyword[in] identifier[signature] . identifier[parameters] . identifier[items] ()
keyword[if] identifier[name] keyword[not] keyword[in] identifier[side_arguments] ]
identifier[return_type] = identifier[signature] . identifier[return_annotation]
identifier[argument_nltk_types] : identifier[List] [ identifier[PredicateType] ]=[ identifier[PredicateType] . identifier[get_type] ( identifier[arg_type] )
keyword[for] identifier[arg_type] keyword[in] identifier[argument_types] ]
identifier[return_nltk_type] = identifier[PredicateType] . identifier[get_type] ( identifier[return_type] )
identifier[function_nltk_type] = identifier[PredicateType] . identifier[get_function_type] ( identifier[argument_nltk_types] , identifier[return_nltk_type] )
identifier[self] . identifier[_functions] [ identifier[name] ]= identifier[function]
identifier[self] . identifier[_function_types] [ identifier[name] ]. identifier[append] ( identifier[function_nltk_type] ) | def add_predicate(self, name: str, function: Callable, side_arguments: List[str]=None):
"""
Adds a predicate to this domain language. Typically you do this with the ``@predicate``
decorator on the methods in your class. But, if you need to for whatever reason, you can
also call this function yourself with a (type-annotated) function to add it to your
language.
Parameters
----------
name : ``str``
The name that we will use in the induced language for this function.
function : ``Callable``
The function that gets called when executing a predicate with the given name.
side_arguments : ``List[str]``, optional
If given, we will ignore these arguments for the purposes of grammar induction. This
is to allow passing extra arguments from the decoder state that are not explicitly part
of the language the decoder produces, such as the decoder's attention over the question
when a terminal was predicted. If you use this functionality, you also `must` use
``language.execute_action_sequence()`` instead of ``language.execute()``, and you must
pass the additional side arguments needed to that function. See
:func:`execute_action_sequence` for more information.
"""
side_arguments = side_arguments or []
signature = inspect.signature(function)
argument_types = [param.annotation for (name, param) in signature.parameters.items() if name not in side_arguments]
return_type = signature.return_annotation
argument_nltk_types: List[PredicateType] = [PredicateType.get_type(arg_type) for arg_type in argument_types]
return_nltk_type = PredicateType.get_type(return_type)
function_nltk_type = PredicateType.get_function_type(argument_nltk_types, return_nltk_type)
self._functions[name] = function
self._function_types[name].append(function_nltk_type) |
def encodeThetas(self, theta1, theta2):
"""Return the SDR for theta1 and theta2"""
# print >> sys.stderr, "encoded theta1 value = ", theta1
# print >> sys.stderr, "encoded theta2 value = ", theta2
t1e = self.theta1Encoder.encode(theta1)
t2e = self.theta2Encoder.encode(theta2)
# print >> sys.stderr, "encoded theta1 = ", t1e.nonzero()[0]
# print >> sys.stderr, "encoded theta2 = ", t2e.nonzero()[0]
ex = numpy.outer(t2e,t1e)
return ex.flatten().nonzero()[0] | def function[encodeThetas, parameter[self, theta1, theta2]]:
constant[Return the SDR for theta1 and theta2]
variable[t1e] assign[=] call[name[self].theta1Encoder.encode, parameter[name[theta1]]]
variable[t2e] assign[=] call[name[self].theta2Encoder.encode, parameter[name[theta2]]]
variable[ex] assign[=] call[name[numpy].outer, parameter[name[t2e], name[t1e]]]
return[call[call[call[name[ex].flatten, parameter[]].nonzero, parameter[]]][constant[0]]] | keyword[def] identifier[encodeThetas] ( identifier[self] , identifier[theta1] , identifier[theta2] ):
literal[string]
identifier[t1e] = identifier[self] . identifier[theta1Encoder] . identifier[encode] ( identifier[theta1] )
identifier[t2e] = identifier[self] . identifier[theta2Encoder] . identifier[encode] ( identifier[theta2] )
identifier[ex] = identifier[numpy] . identifier[outer] ( identifier[t2e] , identifier[t1e] )
keyword[return] identifier[ex] . identifier[flatten] (). identifier[nonzero] ()[ literal[int] ] | def encodeThetas(self, theta1, theta2):
"""Return the SDR for theta1 and theta2"""
# print >> sys.stderr, "encoded theta1 value = ", theta1
# print >> sys.stderr, "encoded theta2 value = ", theta2
t1e = self.theta1Encoder.encode(theta1)
t2e = self.theta2Encoder.encode(theta2)
# print >> sys.stderr, "encoded theta1 = ", t1e.nonzero()[0]
# print >> sys.stderr, "encoded theta2 = ", t2e.nonzero()[0]
ex = numpy.outer(t2e, t1e)
return ex.flatten().nonzero()[0] |
def parse_seeds_xml(root):
"""
Parse <seeds> element in the UBCPI XBlock's content XML.
Args:
root (lxml.etree.Element): The root of the <seeds> node in the tree.
Returns:
a list of deserialized representation of seeds. E.g.
[{
'answer': 1, # option index starting from one
'rationale': 'This is a seeded answer',
},
{....
}]
Raises:
ValidationError: The XML definition is invalid.
"""
seeds = []
for seed_el in root.findall('seed'):
seed_dict = dict()
seed_dict['rationale'] = _safe_get_text(seed_el)
if 'option' in seed_el.attrib:
seed_dict['answer'] = int(seed_el.attrib['option']) - 1
else:
raise ValidationError(_('Seed element must have an option attribute.'))
seeds.append(seed_dict)
return seeds | def function[parse_seeds_xml, parameter[root]]:
constant[
Parse <seeds> element in the UBCPI XBlock's content XML.
Args:
root (lxml.etree.Element): The root of the <seeds> node in the tree.
Returns:
a list of deserialized representation of seeds. E.g.
[{
'answer': 1, # option index starting from one
'rationale': 'This is a seeded answer',
},
{....
}]
Raises:
ValidationError: The XML definition is invalid.
]
variable[seeds] assign[=] list[[]]
for taget[name[seed_el]] in starred[call[name[root].findall, parameter[constant[seed]]]] begin[:]
variable[seed_dict] assign[=] call[name[dict], parameter[]]
call[name[seed_dict]][constant[rationale]] assign[=] call[name[_safe_get_text], parameter[name[seed_el]]]
if compare[constant[option] in name[seed_el].attrib] begin[:]
call[name[seed_dict]][constant[answer]] assign[=] binary_operation[call[name[int], parameter[call[name[seed_el].attrib][constant[option]]]] - constant[1]]
call[name[seeds].append, parameter[name[seed_dict]]]
return[name[seeds]] | keyword[def] identifier[parse_seeds_xml] ( identifier[root] ):
literal[string]
identifier[seeds] =[]
keyword[for] identifier[seed_el] keyword[in] identifier[root] . identifier[findall] ( literal[string] ):
identifier[seed_dict] = identifier[dict] ()
identifier[seed_dict] [ literal[string] ]= identifier[_safe_get_text] ( identifier[seed_el] )
keyword[if] literal[string] keyword[in] identifier[seed_el] . identifier[attrib] :
identifier[seed_dict] [ literal[string] ]= identifier[int] ( identifier[seed_el] . identifier[attrib] [ literal[string] ])- literal[int]
keyword[else] :
keyword[raise] identifier[ValidationError] ( identifier[_] ( literal[string] ))
identifier[seeds] . identifier[append] ( identifier[seed_dict] )
keyword[return] identifier[seeds] | def parse_seeds_xml(root):
"""
Parse <seeds> element in the UBCPI XBlock's content XML.
Args:
root (lxml.etree.Element): The root of the <seeds> node in the tree.
Returns:
a list of deserialized representation of seeds. E.g.
[{
'answer': 1, # option index starting from one
'rationale': 'This is a seeded answer',
},
{....
}]
Raises:
ValidationError: The XML definition is invalid.
"""
seeds = []
for seed_el in root.findall('seed'):
seed_dict = dict()
seed_dict['rationale'] = _safe_get_text(seed_el)
if 'option' in seed_el.attrib:
seed_dict['answer'] = int(seed_el.attrib['option']) - 1 # depends on [control=['if'], data=[]]
else:
raise ValidationError(_('Seed element must have an option attribute.'))
seeds.append(seed_dict) # depends on [control=['for'], data=['seed_el']]
return seeds |
def walk_and_clean(data):
"""
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
"""
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val)
elif isinstance(data, list) or isinstance(data, tuple) \
or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' %
original_keys)
return data | def function[walk_and_clean, parameter[data]]:
constant[
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
]
if call[name[hasattr], parameter[name[data], constant[_fields]]] begin[:]
variable[data] assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da1b196b640>]]
if <ast.BoolOp object at 0x7da1b196be50> begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b196ca00>, <ast.Name object at 0x7da1b196f160>]]] in starred[call[name[data].items, parameter[]]] begin[:]
call[name[data]][name[key]] assign[=] call[name[walk_and_clean], parameter[name[val]]]
if call[name[hasattr], parameter[name[data], constant[items]]] begin[:]
variable[original_keys] assign[=] call[name[data].keys, parameter[]]
variable[tup] assign[=] <ast.GeneratorExp object at 0x7da1b1876170>
variable[data] assign[=] call[name[OrderedDict], parameter[name[tup]]]
if compare[call[name[len], parameter[name[data]]] less[<] call[name[len], parameter[name[original_keys]]]] begin[:]
<ast.Raise object at 0x7da1b1876020>
return[name[data]] | keyword[def] identifier[walk_and_clean] ( identifier[data] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[data] = identifier[OrderedDict] (( identifier[k] , identifier[v] ) keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[zip] ( identifier[data] . identifier[_fields] , identifier[data] ))
keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[data] , literal[string] ):
keyword[for] ( identifier[key] , identifier[val] ) keyword[in] identifier[data] . identifier[items] ():
identifier[data] [ identifier[key] ]= identifier[walk_and_clean] ( identifier[val] )
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[data] , identifier[tuple] ) keyword[or] identifier[hasattr] ( identifier[data] , literal[string] ) keyword[or] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[data] =[ identifier[walk_and_clean] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[data] ]
keyword[if] identifier[hasattr] ( identifier[data] , literal[string] ):
identifier[original_keys] = identifier[data] . identifier[keys] ()
identifier[tup] =(( identifier[clean_key_name] ( identifier[k] ), identifier[v] ) keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[data] . identifier[items] ())
identifier[data] = identifier[OrderedDict] ( identifier[tup] )
keyword[if] identifier[len] ( identifier[data] )< identifier[len] ( identifier[original_keys] ):
keyword[raise] identifier[KeyError] ( literal[string] %
identifier[original_keys] )
keyword[return] identifier[data] | def walk_and_clean(data):
"""
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
"""
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict(((k, v) for (k, v) in zip(data._fields, data))) # depends on [control=['if'], data=[]]
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(data, list) or isinstance(data, tuple) or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data] # depends on [control=['if'], data=[]]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' % original_keys) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return data |
def _format_line(headers, fields):
"""Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string.
"""
assert len(fields) == len(headers), (fields, headers)
fields = ["%2.4f" % field if isinstance(field, float) else str(field)
for field in fields]
return ' '.join(' ' * max(0, len(header) - len(field)) + field
for (header, field) in zip(headers, fields)) | def function[_format_line, parameter[headers, fields]]:
constant[Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string.
]
assert[compare[call[name[len], parameter[name[fields]]] equal[==] call[name[len], parameter[name[headers]]]]]
variable[fields] assign[=] <ast.ListComp object at 0x7da1b1f5b910>
return[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b1f471f0>]]] | keyword[def] identifier[_format_line] ( identifier[headers] , identifier[fields] ):
literal[string]
keyword[assert] identifier[len] ( identifier[fields] )== identifier[len] ( identifier[headers] ),( identifier[fields] , identifier[headers] )
identifier[fields] =[ literal[string] % identifier[field] keyword[if] identifier[isinstance] ( identifier[field] , identifier[float] ) keyword[else] identifier[str] ( identifier[field] )
keyword[for] identifier[field] keyword[in] identifier[fields] ]
keyword[return] literal[string] . identifier[join] ( literal[string] * identifier[max] ( literal[int] , identifier[len] ( identifier[header] )- identifier[len] ( identifier[field] ))+ identifier[field]
keyword[for] ( identifier[header] , identifier[field] ) keyword[in] identifier[zip] ( identifier[headers] , identifier[fields] )) | def _format_line(headers, fields):
"""Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string.
"""
assert len(fields) == len(headers), (fields, headers)
fields = ['%2.4f' % field if isinstance(field, float) else str(field) for field in fields]
return ' '.join((' ' * max(0, len(header) - len(field)) + field for (header, field) in zip(headers, fields))) |
def update(self,
stats,
duration=3,
cs_status=None,
return_to_browser=False):
"""Update the screen.
INPUT
stats: Stats database to display
duration: duration of the loop
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUTPUT
True: Exit key has been pressed
False: Others cases...
"""
# Flush display
self.flush(stats, cs_status=cs_status)
# If the duration is < 0 (update + export time > refresh_time)
# Then display the interface and log a message
if duration <= 0:
logger.warning('Update and export time higher than refresh_time.')
duration = 0.1
# Wait duration (in s) time
exitkey = False
countdown = Timer(duration)
# Set the default timeout (in ms) for the getch method
self.term_window.timeout(int(duration * 1000))
while not countdown.finished() and not exitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
# Is it an exit key ?
exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q'))
if not exitkey and pressedkey > -1:
# Redraw display
self.flush(stats, cs_status=cs_status)
# Overwrite the timeout with the countdown
self.term_window.timeout(int(countdown.get() * 1000))
return exitkey | def function[update, parameter[self, stats, duration, cs_status, return_to_browser]]:
constant[Update the screen.
INPUT
stats: Stats database to display
duration: duration of the loop
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUTPUT
True: Exit key has been pressed
False: Others cases...
]
call[name[self].flush, parameter[name[stats]]]
if compare[name[duration] less_or_equal[<=] constant[0]] begin[:]
call[name[logger].warning, parameter[constant[Update and export time higher than refresh_time.]]]
variable[duration] assign[=] constant[0.1]
variable[exitkey] assign[=] constant[False]
variable[countdown] assign[=] call[name[Timer], parameter[name[duration]]]
call[name[self].term_window.timeout, parameter[call[name[int], parameter[binary_operation[name[duration] * constant[1000]]]]]]
while <ast.BoolOp object at 0x7da18f09ed10> begin[:]
variable[pressedkey] assign[=] call[name[self].__catch_key, parameter[]]
variable[exitkey] assign[=] <ast.BoolOp object at 0x7da18f09ee60>
if <ast.BoolOp object at 0x7da18f09cb20> begin[:]
call[name[self].flush, parameter[name[stats]]]
call[name[self].term_window.timeout, parameter[call[name[int], parameter[binary_operation[call[name[countdown].get, parameter[]] * constant[1000]]]]]]
return[name[exitkey]] | keyword[def] identifier[update] ( identifier[self] ,
identifier[stats] ,
identifier[duration] = literal[int] ,
identifier[cs_status] = keyword[None] ,
identifier[return_to_browser] = keyword[False] ):
literal[string]
identifier[self] . identifier[flush] ( identifier[stats] , identifier[cs_status] = identifier[cs_status] )
keyword[if] identifier[duration] <= literal[int] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[duration] = literal[int]
identifier[exitkey] = keyword[False]
identifier[countdown] = identifier[Timer] ( identifier[duration] )
identifier[self] . identifier[term_window] . identifier[timeout] ( identifier[int] ( identifier[duration] * literal[int] ))
keyword[while] keyword[not] identifier[countdown] . identifier[finished] () keyword[and] keyword[not] identifier[exitkey] :
identifier[pressedkey] = identifier[self] . identifier[__catch_key] ( identifier[return_to_browser] = identifier[return_to_browser] )
identifier[exitkey] =( identifier[pressedkey] == identifier[ord] ( literal[string] ) keyword[or] identifier[pressedkey] == identifier[ord] ( literal[string] ))
keyword[if] keyword[not] identifier[exitkey] keyword[and] identifier[pressedkey] >- literal[int] :
identifier[self] . identifier[flush] ( identifier[stats] , identifier[cs_status] = identifier[cs_status] )
identifier[self] . identifier[term_window] . identifier[timeout] ( identifier[int] ( identifier[countdown] . identifier[get] ()* literal[int] ))
keyword[return] identifier[exitkey] | def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Update the screen.
INPUT
stats: Stats database to display
duration: duration of the loop
cs_status:
"None": standalone or server mode
"Connected": Client is connected to the server
"Disconnected": Client is disconnected from the server
return_to_browser:
True: Do not exist, return to the browser list
False: Exit and return to the shell
OUTPUT
True: Exit key has been pressed
False: Others cases...
"""
# Flush display
self.flush(stats, cs_status=cs_status)
# If the duration is < 0 (update + export time > refresh_time)
# Then display the interface and log a message
if duration <= 0:
logger.warning('Update and export time higher than refresh_time.')
duration = 0.1 # depends on [control=['if'], data=['duration']]
# Wait duration (in s) time
exitkey = False
countdown = Timer(duration)
# Set the default timeout (in ms) for the getch method
self.term_window.timeout(int(duration * 1000))
while not countdown.finished() and (not exitkey):
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
# Is it an exit key ?
exitkey = pressedkey == ord('\x1b') or pressedkey == ord('q')
if not exitkey and pressedkey > -1:
# Redraw display
self.flush(stats, cs_status=cs_status)
# Overwrite the timeout with the countdown
self.term_window.timeout(int(countdown.get() * 1000)) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return exitkey |
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list) | def function[unnest_collection, parameter[collection, df_list]]:
constant[Unnest collection structure extracting all its datasets and converting them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously deserialized to a python object by json.load() or json.loads(),
df_list (list): list variable which will contain the converted datasets.
Returns:
Nothing.
]
for taget[name[item]] in starred[call[call[name[collection]][constant[link]]][constant[item]]] begin[:]
if compare[call[name[item]][constant[class]] equal[==] constant[dataset]] begin[:]
call[name[df_list].append, parameter[call[call[name[Dataset].read, parameter[call[name[item]][constant[href]]]].write, parameter[constant[dataframe]]]]] | keyword[def] identifier[unnest_collection] ( identifier[collection] , identifier[df_list] ):
literal[string]
keyword[for] identifier[item] keyword[in] identifier[collection] [ literal[string] ][ literal[string] ]:
keyword[if] identifier[item] [ literal[string] ]== literal[string] :
identifier[df_list] . identifier[append] ( identifier[Dataset] . identifier[read] ( identifier[item] [ literal[string] ]). identifier[write] ( literal[string] ))
keyword[elif] identifier[item] [ literal[string] ]== literal[string] :
identifier[nested_collection] = identifier[request] ( identifier[item] [ literal[string] ])
identifier[unnest_collection] ( identifier[nested_collection] , identifier[df_list] ) | def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously deserialized to a python object by json.load() or json.loads(),
df_list (list): list variable which will contain the converted datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe')) # depends on [control=['if'], data=[]]
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] |
def errors():
"""Error view in case of invalid oauth requests."""
from oauthlib.oauth2.rfc6749.errors import raise_from_error
try:
error = None
raise_from_error(request.values.get('error'), params=dict())
except OAuth2Error as raised:
error = raised
return render_template('invenio_oauth2server/errors.html', error=error) | def function[errors, parameter[]]:
constant[Error view in case of invalid oauth requests.]
from relative_module[oauthlib.oauth2.rfc6749.errors] import module[raise_from_error]
<ast.Try object at 0x7da1b2500dc0>
return[call[name[render_template], parameter[constant[invenio_oauth2server/errors.html]]]] | keyword[def] identifier[errors] ():
literal[string]
keyword[from] identifier[oauthlib] . identifier[oauth2] . identifier[rfc6749] . identifier[errors] keyword[import] identifier[raise_from_error]
keyword[try] :
identifier[error] = keyword[None]
identifier[raise_from_error] ( identifier[request] . identifier[values] . identifier[get] ( literal[string] ), identifier[params] = identifier[dict] ())
keyword[except] identifier[OAuth2Error] keyword[as] identifier[raised] :
identifier[error] = identifier[raised]
keyword[return] identifier[render_template] ( literal[string] , identifier[error] = identifier[error] ) | def errors():
"""Error view in case of invalid oauth requests."""
from oauthlib.oauth2.rfc6749.errors import raise_from_error
try:
error = None
raise_from_error(request.values.get('error'), params=dict()) # depends on [control=['try'], data=[]]
except OAuth2Error as raised:
error = raised # depends on [control=['except'], data=['raised']]
return render_template('invenio_oauth2server/errors.html', error=error) |
def snmp_server_host_community(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
host = ET.SubElement(snmp_server, "host")
ip_key = ET.SubElement(host, "ip")
ip_key.text = kwargs.pop('ip')
community = ET.SubElement(host, "community")
community.text = kwargs.pop('community')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[snmp_server_host_community, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[snmp_server] assign[=] call[name[ET].SubElement, parameter[name[config], constant[snmp-server]]]
variable[host] assign[=] call[name[ET].SubElement, parameter[name[snmp_server], constant[host]]]
variable[ip_key] assign[=] call[name[ET].SubElement, parameter[name[host], constant[ip]]]
name[ip_key].text assign[=] call[name[kwargs].pop, parameter[constant[ip]]]
variable[community] assign[=] call[name[ET].SubElement, parameter[name[host], constant[community]]]
name[community].text assign[=] call[name[kwargs].pop, parameter[constant[community]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[snmp_server_host_community] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[snmp_server] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[host] = identifier[ET] . identifier[SubElement] ( identifier[snmp_server] , literal[string] )
identifier[ip_key] = identifier[ET] . identifier[SubElement] ( identifier[host] , literal[string] )
identifier[ip_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[community] = identifier[ET] . identifier[SubElement] ( identifier[host] , literal[string] )
identifier[community] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def snmp_server_host_community(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
snmp_server = ET.SubElement(config, 'snmp-server', xmlns='urn:brocade.com:mgmt:brocade-snmp')
host = ET.SubElement(snmp_server, 'host')
ip_key = ET.SubElement(host, 'ip')
ip_key.text = kwargs.pop('ip')
community = ET.SubElement(host, 'community')
community.text = kwargs.pop('community')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def mapToAbsPosition(self, line, column):
"""Convert line and column number to absolute position
"""
block = self.document().findBlockByNumber(line)
if not block.isValid():
raise IndexError("Invalid line index %d" % line)
if column >= block.length():
raise IndexError("Invalid column index %d" % column)
return block.position() + column | def function[mapToAbsPosition, parameter[self, line, column]]:
constant[Convert line and column number to absolute position
]
variable[block] assign[=] call[call[name[self].document, parameter[]].findBlockByNumber, parameter[name[line]]]
if <ast.UnaryOp object at 0x7da18f58c700> begin[:]
<ast.Raise object at 0x7da18f58ea40>
if compare[name[column] greater_or_equal[>=] call[name[block].length, parameter[]]] begin[:]
<ast.Raise object at 0x7da18f58e230>
return[binary_operation[call[name[block].position, parameter[]] + name[column]]] | keyword[def] identifier[mapToAbsPosition] ( identifier[self] , identifier[line] , identifier[column] ):
literal[string]
identifier[block] = identifier[self] . identifier[document] (). identifier[findBlockByNumber] ( identifier[line] )
keyword[if] keyword[not] identifier[block] . identifier[isValid] ():
keyword[raise] identifier[IndexError] ( literal[string] % identifier[line] )
keyword[if] identifier[column] >= identifier[block] . identifier[length] ():
keyword[raise] identifier[IndexError] ( literal[string] % identifier[column] )
keyword[return] identifier[block] . identifier[position] ()+ identifier[column] | def mapToAbsPosition(self, line, column):
"""Convert line and column number to absolute position
"""
block = self.document().findBlockByNumber(line)
if not block.isValid():
raise IndexError('Invalid line index %d' % line) # depends on [control=['if'], data=[]]
if column >= block.length():
raise IndexError('Invalid column index %d' % column) # depends on [control=['if'], data=['column']]
return block.position() + column |
def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
"""
if child is None:
child = self.__class__()
if name is not None:
child.name = name
if dist is not None:
child.dist = dist
if support is not None:
child.support = support
self.children.append(child)
child.up = self
return child | def function[add_child, parameter[self, child, name, dist, support]]:
constant[
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
]
if compare[name[child] is constant[None]] begin[:]
variable[child] assign[=] call[name[self].__class__, parameter[]]
if compare[name[name] is_not constant[None]] begin[:]
name[child].name assign[=] name[name]
if compare[name[dist] is_not constant[None]] begin[:]
name[child].dist assign[=] name[dist]
if compare[name[support] is_not constant[None]] begin[:]
name[child].support assign[=] name[support]
call[name[self].children.append, parameter[name[child]]]
name[child].up assign[=] name[self]
return[name[child]] | keyword[def] identifier[add_child] ( identifier[self] , identifier[child] = keyword[None] , identifier[name] = keyword[None] , identifier[dist] = keyword[None] , identifier[support] = keyword[None] ):
literal[string]
keyword[if] identifier[child] keyword[is] keyword[None] :
identifier[child] = identifier[self] . identifier[__class__] ()
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[child] . identifier[name] = identifier[name]
keyword[if] identifier[dist] keyword[is] keyword[not] keyword[None] :
identifier[child] . identifier[dist] = identifier[dist]
keyword[if] identifier[support] keyword[is] keyword[not] keyword[None] :
identifier[child] . identifier[support] = identifier[support]
identifier[self] . identifier[children] . identifier[append] ( identifier[child] )
identifier[child] . identifier[up] = identifier[self]
keyword[return] identifier[child] | def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
Parameters
----------
child:
the node instance to be added as a child.
name:
the name that will be given to the child.
dist:
the distance from the node to the child.
support':
the support value of child partition.
Returns:
--------
The child node instance
"""
if child is None:
child = self.__class__() # depends on [control=['if'], data=['child']]
if name is not None:
child.name = name # depends on [control=['if'], data=['name']]
if dist is not None:
child.dist = dist # depends on [control=['if'], data=['dist']]
if support is not None:
child.support = support # depends on [control=['if'], data=['support']]
self.children.append(child)
child.up = self
return child |
def _cache_file(self, src_path):
"""
Load the data from `self._xml_roots`
for `src_path`, if it hasn't been already.
"""
# If we have not yet loaded this source file
if src_path not in self._info_cache:
# We only want to keep violations that show up in each xml source.
# Thus, each time, we take the intersection. However, to do this
# we must treat the first time as a special case and just add all
# the violations from the first xml report.
violations = None
# A line is measured if it is measured in any of the reports, so
# we take set union each time and can just start with the empty set
measured = set()
# Loop through the files that contain the xml roots
for xml_document in self._xml_roots:
if xml_document.findall('.[@clover]'):
# see etc/schema/clover.xsd at https://bitbucket.org/atlassian/clover/src
line_nodes = self._get_src_path_line_nodes_clover(xml_document, src_path)
_number = 'num'
_hits = 'count'
elif xml_document.findall('.[@name]'):
# https://github.com/jacoco/jacoco/blob/master/org.jacoco.report/src/org/jacoco/report/xml/report.dtd
line_nodes = self._get_src_path_line_nodes_jacoco(xml_document, src_path)
_number = 'nr'
_hits = 'ci'
else:
# https://github.com/cobertura/web/blob/master/htdocs/xml/coverage-04.dtd
line_nodes = self._get_src_path_line_nodes_cobertura(xml_document, src_path)
_number = 'number'
_hits = 'hits'
if line_nodes is None:
continue
# First case, need to define violations initially
if violations is None:
violations = set(
Violation(int(line.get(_number)), None)
for line in line_nodes
if int(line.get(_hits, 0)) == 0)
# If we already have a violations set,
# take the intersection of the new
# violations set and its old self
else:
violations = violations & set(
Violation(int(line.get(_number)), None)
for line in line_nodes
if int(line.get(_hits, 0)) == 0
)
# Measured is the union of itself and the new measured
measured = measured | set(
int(line.get(_number)) for line in line_nodes
)
# If we don't have any information about the source file,
# don't report any violations
if violations is None:
violations = set()
self._info_cache[src_path] = (violations, measured) | def function[_cache_file, parameter[self, src_path]]:
constant[
Load the data from `self._xml_roots`
for `src_path`, if it hasn't been already.
]
if compare[name[src_path] <ast.NotIn object at 0x7da2590d7190> name[self]._info_cache] begin[:]
variable[violations] assign[=] constant[None]
variable[measured] assign[=] call[name[set], parameter[]]
for taget[name[xml_document]] in starred[name[self]._xml_roots] begin[:]
if call[name[xml_document].findall, parameter[constant[.[@clover]]]] begin[:]
variable[line_nodes] assign[=] call[name[self]._get_src_path_line_nodes_clover, parameter[name[xml_document], name[src_path]]]
variable[_number] assign[=] constant[num]
variable[_hits] assign[=] constant[count]
if compare[name[line_nodes] is constant[None]] begin[:]
continue
if compare[name[violations] is constant[None]] begin[:]
variable[violations] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da18f09e650>]]
variable[measured] assign[=] binary_operation[name[measured] <ast.BitOr object at 0x7da2590d6aa0> call[name[set], parameter[<ast.GeneratorExp object at 0x7da18f09d9c0>]]]
if compare[name[violations] is constant[None]] begin[:]
variable[violations] assign[=] call[name[set], parameter[]]
call[name[self]._info_cache][name[src_path]] assign[=] tuple[[<ast.Name object at 0x7da18f09cb80>, <ast.Name object at 0x7da18f09f520>]] | keyword[def] identifier[_cache_file] ( identifier[self] , identifier[src_path] ):
literal[string]
keyword[if] identifier[src_path] keyword[not] keyword[in] identifier[self] . identifier[_info_cache] :
identifier[violations] = keyword[None]
identifier[measured] = identifier[set] ()
keyword[for] identifier[xml_document] keyword[in] identifier[self] . identifier[_xml_roots] :
keyword[if] identifier[xml_document] . identifier[findall] ( literal[string] ):
identifier[line_nodes] = identifier[self] . identifier[_get_src_path_line_nodes_clover] ( identifier[xml_document] , identifier[src_path] )
identifier[_number] = literal[string]
identifier[_hits] = literal[string]
keyword[elif] identifier[xml_document] . identifier[findall] ( literal[string] ):
identifier[line_nodes] = identifier[self] . identifier[_get_src_path_line_nodes_jacoco] ( identifier[xml_document] , identifier[src_path] )
identifier[_number] = literal[string]
identifier[_hits] = literal[string]
keyword[else] :
identifier[line_nodes] = identifier[self] . identifier[_get_src_path_line_nodes_cobertura] ( identifier[xml_document] , identifier[src_path] )
identifier[_number] = literal[string]
identifier[_hits] = literal[string]
keyword[if] identifier[line_nodes] keyword[is] keyword[None] :
keyword[continue]
keyword[if] identifier[violations] keyword[is] keyword[None] :
identifier[violations] = identifier[set] (
identifier[Violation] ( identifier[int] ( identifier[line] . identifier[get] ( identifier[_number] )), keyword[None] )
keyword[for] identifier[line] keyword[in] identifier[line_nodes]
keyword[if] identifier[int] ( identifier[line] . identifier[get] ( identifier[_hits] , literal[int] ))== literal[int] )
keyword[else] :
identifier[violations] = identifier[violations] & identifier[set] (
identifier[Violation] ( identifier[int] ( identifier[line] . identifier[get] ( identifier[_number] )), keyword[None] )
keyword[for] identifier[line] keyword[in] identifier[line_nodes]
keyword[if] identifier[int] ( identifier[line] . identifier[get] ( identifier[_hits] , literal[int] ))== literal[int]
)
identifier[measured] = identifier[measured] | identifier[set] (
identifier[int] ( identifier[line] . identifier[get] ( identifier[_number] )) keyword[for] identifier[line] keyword[in] identifier[line_nodes]
)
keyword[if] identifier[violations] keyword[is] keyword[None] :
identifier[violations] = identifier[set] ()
identifier[self] . identifier[_info_cache] [ identifier[src_path] ]=( identifier[violations] , identifier[measured] ) | def _cache_file(self, src_path):
"""
Load the data from `self._xml_roots`
for `src_path`, if it hasn't been already.
"""
# If we have not yet loaded this source file
if src_path not in self._info_cache:
# We only want to keep violations that show up in each xml source.
# Thus, each time, we take the intersection. However, to do this
# we must treat the first time as a special case and just add all
# the violations from the first xml report.
violations = None
# A line is measured if it is measured in any of the reports, so
# we take set union each time and can just start with the empty set
measured = set()
# Loop through the files that contain the xml roots
for xml_document in self._xml_roots:
if xml_document.findall('.[@clover]'):
# see etc/schema/clover.xsd at https://bitbucket.org/atlassian/clover/src
line_nodes = self._get_src_path_line_nodes_clover(xml_document, src_path)
_number = 'num'
_hits = 'count' # depends on [control=['if'], data=[]]
elif xml_document.findall('.[@name]'):
# https://github.com/jacoco/jacoco/blob/master/org.jacoco.report/src/org/jacoco/report/xml/report.dtd
line_nodes = self._get_src_path_line_nodes_jacoco(xml_document, src_path)
_number = 'nr'
_hits = 'ci' # depends on [control=['if'], data=[]]
else:
# https://github.com/cobertura/web/blob/master/htdocs/xml/coverage-04.dtd
line_nodes = self._get_src_path_line_nodes_cobertura(xml_document, src_path)
_number = 'number'
_hits = 'hits'
if line_nodes is None:
continue # depends on [control=['if'], data=[]]
# First case, need to define violations initially
if violations is None:
violations = set((Violation(int(line.get(_number)), None) for line in line_nodes if int(line.get(_hits, 0)) == 0)) # depends on [control=['if'], data=['violations']]
else:
# If we already have a violations set,
# take the intersection of the new
# violations set and its old self
violations = violations & set((Violation(int(line.get(_number)), None) for line in line_nodes if int(line.get(_hits, 0)) == 0))
# Measured is the union of itself and the new measured
measured = measured | set((int(line.get(_number)) for line in line_nodes)) # depends on [control=['for'], data=['xml_document']]
# If we don't have any information about the source file,
# don't report any violations
if violations is None:
violations = set() # depends on [control=['if'], data=['violations']]
self._info_cache[src_path] = (violations, measured) # depends on [control=['if'], data=['src_path']] |
def wait_for_logs_matching(container, matcher, timeout=10, encoding='utf-8',
**logs_kwargs):
"""
Wait for matching log line(s) from the given container by streaming the
container's stdout and/or stderr outputs.
Each log line is decoded and any trailing whitespace is stripped before the
line is matched.
:param ~docker.models.containers.Container container:
Container who's log lines to wait for.
:param matcher:
Callable that returns True once it has matched a decoded log line(s).
:param timeout:
Timeout value in seconds.
:param encoding:
Encoding to use when decoding container output to strings.
:param logs_kwargs:
Additional keyword arguments to pass to ``container.logs()``. For
example, the ``stdout`` and ``stderr`` boolean arguments can be used to
determine whether to stream stdout or stderr or both (the default).
:returns:
The final matching log line.
:raises TimeoutError:
When the timeout value is reached before matching log lines have been
found.
:raises RuntimeError:
When all log lines have been consumed but matching log lines have not
been found (the container must have stopped for its stream to have
ended without error).
"""
try:
for line in stream_logs(container, timeout=timeout, **logs_kwargs):
# Drop the trailing newline
line = line.decode(encoding).rstrip()
if matcher(line):
return line
except TimeoutError:
raise TimeoutError('\n'.join([
('Timeout ({}s) waiting for logs matching {}.'.format(
timeout, matcher)),
'Last few log lines:',
_last_few_log_lines(container),
]))
raise RuntimeError('\n'.join([
'Logs matching {} not found.'.format(matcher),
'Last few log lines:',
_last_few_log_lines(container),
])) | def function[wait_for_logs_matching, parameter[container, matcher, timeout, encoding]]:
constant[
Wait for matching log line(s) from the given container by streaming the
container's stdout and/or stderr outputs.
Each log line is decoded and any trailing whitespace is stripped before the
line is matched.
:param ~docker.models.containers.Container container:
Container who's log lines to wait for.
:param matcher:
Callable that returns True once it has matched a decoded log line(s).
:param timeout:
Timeout value in seconds.
:param encoding:
Encoding to use when decoding container output to strings.
:param logs_kwargs:
Additional keyword arguments to pass to ``container.logs()``. For
example, the ``stdout`` and ``stderr`` boolean arguments can be used to
determine whether to stream stdout or stderr or both (the default).
:returns:
The final matching log line.
:raises TimeoutError:
When the timeout value is reached before matching log lines have been
found.
:raises RuntimeError:
When all log lines have been consumed but matching log lines have not
been found (the container must have stopped for its stream to have
ended without error).
]
<ast.Try object at 0x7da1b25359c0>
<ast.Raise object at 0x7da1b2534640> | keyword[def] identifier[wait_for_logs_matching] ( identifier[container] , identifier[matcher] , identifier[timeout] = literal[int] , identifier[encoding] = literal[string] ,
** identifier[logs_kwargs] ):
literal[string]
keyword[try] :
keyword[for] identifier[line] keyword[in] identifier[stream_logs] ( identifier[container] , identifier[timeout] = identifier[timeout] ,** identifier[logs_kwargs] ):
identifier[line] = identifier[line] . identifier[decode] ( identifier[encoding] ). identifier[rstrip] ()
keyword[if] identifier[matcher] ( identifier[line] ):
keyword[return] identifier[line]
keyword[except] identifier[TimeoutError] :
keyword[raise] identifier[TimeoutError] ( literal[string] . identifier[join] ([
( literal[string] . identifier[format] (
identifier[timeout] , identifier[matcher] )),
literal[string] ,
identifier[_last_few_log_lines] ( identifier[container] ),
]))
keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[join] ([
literal[string] . identifier[format] ( identifier[matcher] ),
literal[string] ,
identifier[_last_few_log_lines] ( identifier[container] ),
])) | def wait_for_logs_matching(container, matcher, timeout=10, encoding='utf-8', **logs_kwargs):
"""
Wait for matching log line(s) from the given container by streaming the
container's stdout and/or stderr outputs.
Each log line is decoded and any trailing whitespace is stripped before the
line is matched.
:param ~docker.models.containers.Container container:
Container who's log lines to wait for.
:param matcher:
Callable that returns True once it has matched a decoded log line(s).
:param timeout:
Timeout value in seconds.
:param encoding:
Encoding to use when decoding container output to strings.
:param logs_kwargs:
Additional keyword arguments to pass to ``container.logs()``. For
example, the ``stdout`` and ``stderr`` boolean arguments can be used to
determine whether to stream stdout or stderr or both (the default).
:returns:
The final matching log line.
:raises TimeoutError:
When the timeout value is reached before matching log lines have been
found.
:raises RuntimeError:
When all log lines have been consumed but matching log lines have not
been found (the container must have stopped for its stream to have
ended without error).
"""
try:
for line in stream_logs(container, timeout=timeout, **logs_kwargs):
# Drop the trailing newline
line = line.decode(encoding).rstrip()
if matcher(line):
return line # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['try'], data=[]]
except TimeoutError:
raise TimeoutError('\n'.join(['Timeout ({}s) waiting for logs matching {}.'.format(timeout, matcher), 'Last few log lines:', _last_few_log_lines(container)])) # depends on [control=['except'], data=[]]
raise RuntimeError('\n'.join(['Logs matching {} not found.'.format(matcher), 'Last few log lines:', _last_few_log_lines(container)])) |
def send_duplicate_notification(self):
"""
Send a notification about a duplicate signup.
"""
email_utils.send_email(
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[self.email],
subject=_("Registration Attempt"),
template_name="rest_email_auth/emails/duplicate-email",
)
logger.info("Sent duplicate email notification to: %s", self.email) | def function[send_duplicate_notification, parameter[self]]:
constant[
Send a notification about a duplicate signup.
]
call[name[email_utils].send_email, parameter[]]
call[name[logger].info, parameter[constant[Sent duplicate email notification to: %s], name[self].email]] | keyword[def] identifier[send_duplicate_notification] ( identifier[self] ):
literal[string]
identifier[email_utils] . identifier[send_email] (
identifier[from_email] = identifier[settings] . identifier[DEFAULT_FROM_EMAIL] ,
identifier[recipient_list] =[ identifier[self] . identifier[email] ],
identifier[subject] = identifier[_] ( literal[string] ),
identifier[template_name] = literal[string] ,
)
identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[email] ) | def send_duplicate_notification(self):
"""
Send a notification about a duplicate signup.
"""
email_utils.send_email(from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[self.email], subject=_('Registration Attempt'), template_name='rest_email_auth/emails/duplicate-email')
logger.info('Sent duplicate email notification to: %s', self.email) |
def TransferFrom(self, wallet, from_addr, to_addr, amount):
"""
Transfer a specified amount of a token from the wallet specified in the `from_addr` to the `to_addr`
if the originator `wallet` has been approved to do so.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
from_addr (str): public address of the account to transfer the given amount from.
to_addr (str): public address of the account to transfer the given amount to.
amount (int): quantity to send.
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluation stack results.
"""
invoke_args = [self.ScriptHash.ToString(), 'transferFrom',
[PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet), PromptUtils.parse_param(amount)]]
tx, fee, results, num_ops, engine_success = TestInvokeContract(wallet, invoke_args, None, True)
return tx, fee, results | def function[TransferFrom, parameter[self, wallet, from_addr, to_addr, amount]]:
constant[
Transfer a specified amount of a token from the wallet specified in the `from_addr` to the `to_addr`
if the originator `wallet` has been approved to do so.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
from_addr (str): public address of the account to transfer the given amount from.
to_addr (str): public address of the account to transfer the given amount to.
amount (int): quantity to send.
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluation stack results.
]
variable[invoke_args] assign[=] list[[<ast.Call object at 0x7da20c7cae00>, <ast.Constant object at 0x7da20c7c9f90>, <ast.List object at 0x7da20c7c8040>]]
<ast.Tuple object at 0x7da1b1dd3760> assign[=] call[name[TestInvokeContract], parameter[name[wallet], name[invoke_args], constant[None], constant[True]]]
return[tuple[[<ast.Name object at 0x7da1b1dd1750>, <ast.Name object at 0x7da1b1dd1e70>, <ast.Name object at 0x7da1b1dd3be0>]]] | keyword[def] identifier[TransferFrom] ( identifier[self] , identifier[wallet] , identifier[from_addr] , identifier[to_addr] , identifier[amount] ):
literal[string]
identifier[invoke_args] =[ identifier[self] . identifier[ScriptHash] . identifier[ToString] (), literal[string] ,
[ identifier[PromptUtils] . identifier[parse_param] ( identifier[from_addr] , identifier[wallet] ), identifier[PromptUtils] . identifier[parse_param] ( identifier[to_addr] , identifier[wallet] ), identifier[PromptUtils] . identifier[parse_param] ( identifier[amount] )]]
identifier[tx] , identifier[fee] , identifier[results] , identifier[num_ops] , identifier[engine_success] = identifier[TestInvokeContract] ( identifier[wallet] , identifier[invoke_args] , keyword[None] , keyword[True] )
keyword[return] identifier[tx] , identifier[fee] , identifier[results] | def TransferFrom(self, wallet, from_addr, to_addr, amount):
"""
Transfer a specified amount of a token from the wallet specified in the `from_addr` to the `to_addr`
if the originator `wallet` has been approved to do so.
Args:
wallet (neo.Wallets.Wallet): a wallet instance.
from_addr (str): public address of the account to transfer the given amount from.
to_addr (str): public address of the account to transfer the given amount to.
amount (int): quantity to send.
Returns:
tuple:
InvocationTransaction: the transaction.
int: the transaction fee.
list: the neo VM evaluation stack results.
"""
invoke_args = [self.ScriptHash.ToString(), 'transferFrom', [PromptUtils.parse_param(from_addr, wallet), PromptUtils.parse_param(to_addr, wallet), PromptUtils.parse_param(amount)]]
(tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True)
return (tx, fee, results) |
def parse_iso_utc(s):
"""
Parses an ISO time with a hard-coded Z for zulu-time (UTC) at the end. Other timezones are
not supported.
:param str s: the ISO-formatted time
:rtype: datetime.datetime
:return: an timezone-naive datetime object
>>> parse_iso_utc('2016-04-27T00:28:04.000Z')
datetime.datetime(2016, 4, 27, 0, 28, 4)
>>> parse_iso_utc('2016-04-27T00:28:04Z')
datetime.datetime(2016, 4, 27, 0, 28, 4)
>>> parse_iso_utc('2016-04-27T00:28:04X')
Traceback (most recent call last):
...
ValueError: Not a valid ISO datetime in UTC: 2016-04-27T00:28:04X
"""
m = rfc3339_datetime_re().match(s)
if not m:
raise ValueError( 'Not a valid ISO datetime in UTC: ' + s )
else:
fmt = '%Y-%m-%dT%H:%M:%S' + ('.%f' if m.group(7) else '') + 'Z'
return datetime.datetime.strptime(s, fmt) | def function[parse_iso_utc, parameter[s]]:
constant[
Parses an ISO time with a hard-coded Z for zulu-time (UTC) at the end. Other timezones are
not supported.
:param str s: the ISO-formatted time
:rtype: datetime.datetime
:return: an timezone-naive datetime object
>>> parse_iso_utc('2016-04-27T00:28:04.000Z')
datetime.datetime(2016, 4, 27, 0, 28, 4)
>>> parse_iso_utc('2016-04-27T00:28:04Z')
datetime.datetime(2016, 4, 27, 0, 28, 4)
>>> parse_iso_utc('2016-04-27T00:28:04X')
Traceback (most recent call last):
...
ValueError: Not a valid ISO datetime in UTC: 2016-04-27T00:28:04X
]
variable[m] assign[=] call[call[name[rfc3339_datetime_re], parameter[]].match, parameter[name[s]]]
if <ast.UnaryOp object at 0x7da20c991b10> begin[:]
<ast.Raise object at 0x7da20c990cd0> | keyword[def] identifier[parse_iso_utc] ( identifier[s] ):
literal[string]
identifier[m] = identifier[rfc3339_datetime_re] (). identifier[match] ( identifier[s] )
keyword[if] keyword[not] identifier[m] :
keyword[raise] identifier[ValueError] ( literal[string] + identifier[s] )
keyword[else] :
identifier[fmt] = literal[string] +( literal[string] keyword[if] identifier[m] . identifier[group] ( literal[int] ) keyword[else] literal[string] )+ literal[string]
keyword[return] identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[s] , identifier[fmt] ) | def parse_iso_utc(s):
"""
Parses an ISO time with a hard-coded Z for zulu-time (UTC) at the end. Other timezones are
not supported.
:param str s: the ISO-formatted time
:rtype: datetime.datetime
:return: an timezone-naive datetime object
>>> parse_iso_utc('2016-04-27T00:28:04.000Z')
datetime.datetime(2016, 4, 27, 0, 28, 4)
>>> parse_iso_utc('2016-04-27T00:28:04Z')
datetime.datetime(2016, 4, 27, 0, 28, 4)
>>> parse_iso_utc('2016-04-27T00:28:04X')
Traceback (most recent call last):
...
ValueError: Not a valid ISO datetime in UTC: 2016-04-27T00:28:04X
"""
m = rfc3339_datetime_re().match(s)
if not m:
raise ValueError('Not a valid ISO datetime in UTC: ' + s) # depends on [control=['if'], data=[]]
else:
fmt = '%Y-%m-%dT%H:%M:%S' + ('.%f' if m.group(7) else '') + 'Z'
return datetime.datetime.strptime(s, fmt) |
def list_buckets(self):
'''List all buckets'''
result = []
for bucket in self.s3.list_buckets().get('Buckets') or []:
result.append({
'name': S3URL.combine('s3', bucket['Name'], ''),
'is_dir': True,
'size': 0,
'last_modified': bucket['CreationDate']
})
return result | def function[list_buckets, parameter[self]]:
constant[List all buckets]
variable[result] assign[=] list[[]]
for taget[name[bucket]] in starred[<ast.BoolOp object at 0x7da1b0284c70>] begin[:]
call[name[result].append, parameter[dictionary[[<ast.Constant object at 0x7da1b0317b20>, <ast.Constant object at 0x7da1b03162f0>, <ast.Constant object at 0x7da1b0317610>, <ast.Constant object at 0x7da1b03171c0>], [<ast.Call object at 0x7da1b03173d0>, <ast.Constant object at 0x7da1b033a9e0>, <ast.Constant object at 0x7da1b033bdc0>, <ast.Subscript object at 0x7da1b0339900>]]]]
return[name[result]] | keyword[def] identifier[list_buckets] ( identifier[self] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[bucket] keyword[in] identifier[self] . identifier[s3] . identifier[list_buckets] (). identifier[get] ( literal[string] ) keyword[or] []:
identifier[result] . identifier[append] ({
literal[string] : identifier[S3URL] . identifier[combine] ( literal[string] , identifier[bucket] [ literal[string] ], literal[string] ),
literal[string] : keyword[True] ,
literal[string] : literal[int] ,
literal[string] : identifier[bucket] [ literal[string] ]
})
keyword[return] identifier[result] | def list_buckets(self):
"""List all buckets"""
result = []
for bucket in self.s3.list_buckets().get('Buckets') or []:
result.append({'name': S3URL.combine('s3', bucket['Name'], ''), 'is_dir': True, 'size': 0, 'last_modified': bucket['CreationDate']}) # depends on [control=['for'], data=['bucket']]
return result |
def readstream(self, stream):
""" Reads the specified stream and parses the token elements generated
from tokenizing the input data.
`stream`
``File``-like object.
Returns boolean.
"""
self._reset()
try:
# tokenize input stream
self._lexer = SettingLexer()
self._lexer.readstream(stream)
# parse tokens into AST
self._parse()
return True
except IOError:
self._reset()
return False | def function[readstream, parameter[self, stream]]:
constant[ Reads the specified stream and parses the token elements generated
from tokenizing the input data.
`stream`
``File``-like object.
Returns boolean.
]
call[name[self]._reset, parameter[]]
<ast.Try object at 0x7da18f720d00> | keyword[def] identifier[readstream] ( identifier[self] , identifier[stream] ):
literal[string]
identifier[self] . identifier[_reset] ()
keyword[try] :
identifier[self] . identifier[_lexer] = identifier[SettingLexer] ()
identifier[self] . identifier[_lexer] . identifier[readstream] ( identifier[stream] )
identifier[self] . identifier[_parse] ()
keyword[return] keyword[True]
keyword[except] identifier[IOError] :
identifier[self] . identifier[_reset] ()
keyword[return] keyword[False] | def readstream(self, stream):
""" Reads the specified stream and parses the token elements generated
from tokenizing the input data.
`stream`
``File``-like object.
Returns boolean.
"""
self._reset()
try:
# tokenize input stream
self._lexer = SettingLexer()
self._lexer.readstream(stream)
# parse tokens into AST
self._parse()
return True # depends on [control=['try'], data=[]]
except IOError:
self._reset()
return False # depends on [control=['except'], data=[]] |
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc | def function[_get_normal_name, parameter[orig_enc]]:
constant[Imitates get_normal_name in tokenizer.c.]
variable[enc] assign[=] call[call[call[name[orig_enc]][<ast.Slice object at 0x7da204621840>].lower, parameter[]].replace, parameter[constant[_], constant[-]]]
if <ast.BoolOp object at 0x7da204620640> begin[:]
return[constant[utf-8]]
if <ast.BoolOp object at 0x7da204623e50> begin[:]
return[constant[iso-8859-1]]
return[name[orig_enc]] | keyword[def] identifier[_get_normal_name] ( identifier[orig_enc] ):
literal[string]
identifier[enc] = identifier[orig_enc] [: literal[int] ]. identifier[lower] (). identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[enc] == literal[string] keyword[or] identifier[enc] . identifier[startswith] ( literal[string] ):
keyword[return] literal[string]
keyword[if] identifier[enc] keyword[in] ( literal[string] , literal[string] , literal[string] ) keyword[or] identifier[enc] . identifier[startswith] (( literal[string] , literal[string] , literal[string] )):
keyword[return] literal[string]
keyword[return] identifier[orig_enc] | def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace('_', '-')
if enc == 'utf-8' or enc.startswith('utf-8-'):
return 'utf-8' # depends on [control=['if'], data=[]]
if enc in ('latin-1', 'iso-8859-1', 'iso-latin-1') or enc.startswith(('latin-1-', 'iso-8859-1-', 'iso-latin-1-')):
return 'iso-8859-1' # depends on [control=['if'], data=[]]
return orig_enc |
def styblinski_tang(theta):
"""Styblinski-Tang function"""
x, y = theta
obj = 0.5 * (x ** 4 - 16 * x ** 2 + 5 * x + y ** 4 - 16 * y ** 2 + 5 * y)
grad = np.array([
2 * x ** 3 - 16 * x + 2.5,
2 * y ** 3 - 16 * y + 2.5,
])
return obj, grad | def function[styblinski_tang, parameter[theta]]:
constant[Styblinski-Tang function]
<ast.Tuple object at 0x7da1b0089b70> assign[=] name[theta]
variable[obj] assign[=] binary_operation[constant[0.5] * binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[x] ** constant[4]] - binary_operation[constant[16] * binary_operation[name[x] ** constant[2]]]] + binary_operation[constant[5] * name[x]]] + binary_operation[name[y] ** constant[4]]] - binary_operation[constant[16] * binary_operation[name[y] ** constant[2]]]] + binary_operation[constant[5] * name[y]]]]
variable[grad] assign[=] call[name[np].array, parameter[list[[<ast.BinOp object at 0x7da1afe07af0>, <ast.BinOp object at 0x7da1afe07e50>]]]]
return[tuple[[<ast.Name object at 0x7da1b0088310>, <ast.Name object at 0x7da1b0088280>]]] | keyword[def] identifier[styblinski_tang] ( identifier[theta] ):
literal[string]
identifier[x] , identifier[y] = identifier[theta]
identifier[obj] = literal[int] *( identifier[x] ** literal[int] - literal[int] * identifier[x] ** literal[int] + literal[int] * identifier[x] + identifier[y] ** literal[int] - literal[int] * identifier[y] ** literal[int] + literal[int] * identifier[y] )
identifier[grad] = identifier[np] . identifier[array] ([
literal[int] * identifier[x] ** literal[int] - literal[int] * identifier[x] + literal[int] ,
literal[int] * identifier[y] ** literal[int] - literal[int] * identifier[y] + literal[int] ,
])
keyword[return] identifier[obj] , identifier[grad] | def styblinski_tang(theta):
"""Styblinski-Tang function"""
(x, y) = theta
obj = 0.5 * (x ** 4 - 16 * x ** 2 + 5 * x + y ** 4 - 16 * y ** 2 + 5 * y)
grad = np.array([2 * x ** 3 - 16 * x + 2.5, 2 * y ** 3 - 16 * y + 2.5])
return (obj, grad) |
def match_field(self, field, value, required=True, new_group=False):
"""Add a ``field:value`` term to the query.
Matches will have the ``value`` in the ``field``.
Arguments:
field (str): The field to check for the value.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
value (str): The value to match.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
# If not the start of the query string, add an AND or OR
if self.initialized:
if required:
self._and_join(new_group)
else:
self._or_join(new_group)
self._field(field, value)
return self | def function[match_field, parameter[self, field, value, required, new_group]]:
constant[Add a ``field:value`` term to the query.
Matches will have the ``value`` in the ``field``.
Arguments:
field (str): The field to check for the value.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
value (str): The value to match.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
]
if name[self].initialized begin[:]
if name[required] begin[:]
call[name[self]._and_join, parameter[name[new_group]]]
call[name[self]._field, parameter[name[field], name[value]]]
return[name[self]] | keyword[def] identifier[match_field] ( identifier[self] , identifier[field] , identifier[value] , identifier[required] = keyword[True] , identifier[new_group] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[initialized] :
keyword[if] identifier[required] :
identifier[self] . identifier[_and_join] ( identifier[new_group] )
keyword[else] :
identifier[self] . identifier[_or_join] ( identifier[new_group] )
identifier[self] . identifier[_field] ( identifier[field] , identifier[value] )
keyword[return] identifier[self] | def match_field(self, field, value, required=True, new_group=False):
"""Add a ``field:value`` term to the query.
Matches will have the ``value`` in the ``field``.
Arguments:
field (str): The field to check for the value.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
value (str): The value to match.
required (bool): If ``True``, will add term with ``AND``.
If ``False``, will use ``OR``. **Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
# If not the start of the query string, add an AND or OR
if self.initialized:
if required:
self._and_join(new_group) # depends on [control=['if'], data=[]]
else:
self._or_join(new_group) # depends on [control=['if'], data=[]]
self._field(field, value)
return self |
def report_version(self, data):
"""
This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
:param data: Message data from Firmata
:return: No return value.
"""
self.firmata_version.append(data[0]) # add major
self.firmata_version.append(data[1]) | def function[report_version, parameter[self, data]]:
constant[
This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
:param data: Message data from Firmata
:return: No return value.
]
call[name[self].firmata_version.append, parameter[call[name[data]][constant[0]]]]
call[name[self].firmata_version.append, parameter[call[name[data]][constant[1]]]] | keyword[def] identifier[report_version] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[firmata_version] . identifier[append] ( identifier[data] [ literal[int] ])
identifier[self] . identifier[firmata_version] . identifier[append] ( identifier[data] [ literal[int] ]) | def report_version(self, data):
"""
This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
:param data: Message data from Firmata
:return: No return value.
"""
self.firmata_version.append(data[0]) # add major
self.firmata_version.append(data[1]) |
def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
) | def function[foldr, parameter[f, seq, default]]:
constant[Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
]
return[call[name[reduce], parameter[call[name[flip], parameter[name[f]]], call[name[reversed], parameter[name[seq]]], <ast.Starred object at 0x7da1b1e8d540>]]] | keyword[def] identifier[foldr] ( identifier[f] , identifier[seq] , identifier[default] = identifier[_no_default] ):
literal[string]
keyword[return] identifier[reduce] (
identifier[flip] ( identifier[f] ),
identifier[reversed] ( identifier[seq] ),
*( identifier[default] ,) keyword[if] identifier[default] keyword[is] keyword[not] identifier[_no_default] keyword[else] ()
) | def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(flip(f), reversed(seq), *((default,) if default is not _no_default else ())) |
def findTargets(self):
'''
Finds the target Views (i.e. for touches).
'''
if DEBUG:
print >> sys.stderr, "findTargets()"
LISTVIEW_CLASS = 'android.widget.ListView'
''' The ListView class name '''
self.targets = []
''' The list of target coordinates (x1, y1, x2, y2) '''
self.targetViews = []
''' The list of target Views '''
if CHECK_KEYBOARD_SHOWN:
if self.device.isKeyboardShown():
print >> sys.stderr, "#### keyboard is show but handling it is not implemented yet ####"
# FIXME: still no windows in uiautomator
window = -1
else:
window = -1
else:
window = -1
if self.vc:
dump = self.vc.dump(window=window, sleep=0.1)
self.printOperation(None, Operation.DUMP, window, dump)
else:
dump = []
self.dump = dump
# the root element cannot be deleted from Treeview once added.
# We have no option but to recreate it
self.viewTree = ViewTree(self.sideFrame)
for v in dump:
if DEBUG:
print >> sys.stderr, " findTargets: analyzing", v.getClass(), v.getId()
if v.getClass() == LISTVIEW_CLASS:
# We may want to touch ListView elements, not just the ListView
continue
parent = v.getParent()
if (parent and parent.getClass() == LISTVIEW_CLASS and self.isClickableCheckableOrFocusable(parent)) \
or self.isClickableCheckableOrFocusable(v):
# If this is a touchable ListView, let's add its children instead
# or add it if it's touchable, focusable, whatever
((x1, y1), (x2, y2)) = v.getCoords()
if DEBUG:
print >> sys.stderr, "appending target", ((x1, y1, x2, y2))
v.setTarget(True)
self.targets.append((x1, y1, x2, y2))
self.targetViews.append(v)
target = True
else:
target = False
if self.vc:
self.vc.traverse(transform=self.populateViewTree) | def function[findTargets, parameter[self]]:
constant[
Finds the target Views (i.e. for touches).
]
if name[DEBUG] begin[:]
tuple[[<ast.BinOp object at 0x7da1b1da2440>, <ast.Constant object at 0x7da1b1da3400>]]
variable[LISTVIEW_CLASS] assign[=] constant[android.widget.ListView]
constant[ The ListView class name ]
name[self].targets assign[=] list[[]]
constant[ The list of target coordinates (x1, y1, x2, y2) ]
name[self].targetViews assign[=] list[[]]
constant[ The list of target Views ]
if name[CHECK_KEYBOARD_SHOWN] begin[:]
if call[name[self].device.isKeyboardShown, parameter[]] begin[:]
tuple[[<ast.BinOp object at 0x7da1b1da3550>, <ast.Constant object at 0x7da1b1da0a00>]]
variable[window] assign[=] <ast.UnaryOp object at 0x7da1b1da0250>
if name[self].vc begin[:]
variable[dump] assign[=] call[name[self].vc.dump, parameter[]]
call[name[self].printOperation, parameter[constant[None], name[Operation].DUMP, name[window], name[dump]]]
name[self].dump assign[=] name[dump]
name[self].viewTree assign[=] call[name[ViewTree], parameter[name[self].sideFrame]]
for taget[name[v]] in starred[name[dump]] begin[:]
if name[DEBUG] begin[:]
tuple[[<ast.BinOp object at 0x7da1b1d0d480>, <ast.Constant object at 0x7da1b1d0cd90>, <ast.Call object at 0x7da1b1d0d4b0>, <ast.Call object at 0x7da1b1d0e6e0>]]
if compare[call[name[v].getClass, parameter[]] equal[==] name[LISTVIEW_CLASS]] begin[:]
continue
variable[parent] assign[=] call[name[v].getParent, parameter[]]
if <ast.BoolOp object at 0x7da1b1d0f9d0> begin[:]
<ast.Tuple object at 0x7da1b1d82800> assign[=] call[name[v].getCoords, parameter[]]
if name[DEBUG] begin[:]
tuple[[<ast.BinOp object at 0x7da1b1d83d60>, <ast.Constant object at 0x7da1b1d80790>, <ast.Tuple object at 0x7da1b1d82dd0>]]
call[name[v].setTarget, parameter[constant[True]]]
call[name[self].targets.append, parameter[tuple[[<ast.Name object at 0x7da1b1d81090>, <ast.Name object at 0x7da1b1d81f90>, <ast.Name object at 0x7da1b1d80d00>, <ast.Name object at 0x7da1b1d809d0>]]]]
call[name[self].targetViews.append, parameter[name[v]]]
variable[target] assign[=] constant[True]
if name[self].vc begin[:]
call[name[self].vc.traverse, parameter[]] | keyword[def] identifier[findTargets] ( identifier[self] ):
literal[string]
keyword[if] identifier[DEBUG] :
identifier[print] >> identifier[sys] . identifier[stderr] , literal[string]
identifier[LISTVIEW_CLASS] = literal[string]
literal[string]
identifier[self] . identifier[targets] =[]
literal[string]
identifier[self] . identifier[targetViews] =[]
literal[string]
keyword[if] identifier[CHECK_KEYBOARD_SHOWN] :
keyword[if] identifier[self] . identifier[device] . identifier[isKeyboardShown] ():
identifier[print] >> identifier[sys] . identifier[stderr] , literal[string]
identifier[window] =- literal[int]
keyword[else] :
identifier[window] =- literal[int]
keyword[else] :
identifier[window] =- literal[int]
keyword[if] identifier[self] . identifier[vc] :
identifier[dump] = identifier[self] . identifier[vc] . identifier[dump] ( identifier[window] = identifier[window] , identifier[sleep] = literal[int] )
identifier[self] . identifier[printOperation] ( keyword[None] , identifier[Operation] . identifier[DUMP] , identifier[window] , identifier[dump] )
keyword[else] :
identifier[dump] =[]
identifier[self] . identifier[dump] = identifier[dump]
identifier[self] . identifier[viewTree] = identifier[ViewTree] ( identifier[self] . identifier[sideFrame] )
keyword[for] identifier[v] keyword[in] identifier[dump] :
keyword[if] identifier[DEBUG] :
identifier[print] >> identifier[sys] . identifier[stderr] , literal[string] , identifier[v] . identifier[getClass] (), identifier[v] . identifier[getId] ()
keyword[if] identifier[v] . identifier[getClass] ()== identifier[LISTVIEW_CLASS] :
keyword[continue]
identifier[parent] = identifier[v] . identifier[getParent] ()
keyword[if] ( identifier[parent] keyword[and] identifier[parent] . identifier[getClass] ()== identifier[LISTVIEW_CLASS] keyword[and] identifier[self] . identifier[isClickableCheckableOrFocusable] ( identifier[parent] )) keyword[or] identifier[self] . identifier[isClickableCheckableOrFocusable] ( identifier[v] ):
(( identifier[x1] , identifier[y1] ),( identifier[x2] , identifier[y2] ))= identifier[v] . identifier[getCoords] ()
keyword[if] identifier[DEBUG] :
identifier[print] >> identifier[sys] . identifier[stderr] , literal[string] ,(( identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] ))
identifier[v] . identifier[setTarget] ( keyword[True] )
identifier[self] . identifier[targets] . identifier[append] (( identifier[x1] , identifier[y1] , identifier[x2] , identifier[y2] ))
identifier[self] . identifier[targetViews] . identifier[append] ( identifier[v] )
identifier[target] = keyword[True]
keyword[else] :
identifier[target] = keyword[False]
keyword[if] identifier[self] . identifier[vc] :
identifier[self] . identifier[vc] . identifier[traverse] ( identifier[transform] = identifier[self] . identifier[populateViewTree] ) | def findTargets(self):
"""
Finds the target Views (i.e. for touches).
"""
if DEBUG:
(print >> sys.stderr, 'findTargets()') # depends on [control=['if'], data=[]]
LISTVIEW_CLASS = 'android.widget.ListView'
' The ListView class name '
self.targets = []
' The list of target coordinates (x1, y1, x2, y2) '
self.targetViews = []
' The list of target Views '
if CHECK_KEYBOARD_SHOWN:
if self.device.isKeyboardShown():
(print >> sys.stderr, '#### keyboard is show but handling it is not implemented yet ####')
# FIXME: still no windows in uiautomator
window = -1 # depends on [control=['if'], data=[]]
else:
window = -1 # depends on [control=['if'], data=[]]
else:
window = -1
if self.vc:
dump = self.vc.dump(window=window, sleep=0.1)
self.printOperation(None, Operation.DUMP, window, dump) # depends on [control=['if'], data=[]]
else:
dump = []
self.dump = dump
# the root element cannot be deleted from Treeview once added.
# We have no option but to recreate it
self.viewTree = ViewTree(self.sideFrame)
for v in dump:
if DEBUG:
(print >> sys.stderr, ' findTargets: analyzing', v.getClass(), v.getId()) # depends on [control=['if'], data=[]]
if v.getClass() == LISTVIEW_CLASS:
# We may want to touch ListView elements, not just the ListView
continue # depends on [control=['if'], data=[]]
parent = v.getParent()
if parent and parent.getClass() == LISTVIEW_CLASS and self.isClickableCheckableOrFocusable(parent) or self.isClickableCheckableOrFocusable(v):
# If this is a touchable ListView, let's add its children instead
# or add it if it's touchable, focusable, whatever
((x1, y1), (x2, y2)) = v.getCoords()
if DEBUG:
(print >> sys.stderr, 'appending target', (x1, y1, x2, y2)) # depends on [control=['if'], data=[]]
v.setTarget(True)
self.targets.append((x1, y1, x2, y2))
self.targetViews.append(v)
target = True # depends on [control=['if'], data=[]]
else:
target = False # depends on [control=['for'], data=['v']]
if self.vc:
self.vc.traverse(transform=self.populateViewTree) # depends on [control=['if'], data=[]] |
def step(self, data):
"""
Run convolution over a single position. The data must be exactly as wide as the convolution filters.
:param data: Shape: (batch_size, kernel_width, num_hidden).
:return: Single result of a convolution. Shape: (batch_size, 1, num_hidden).
"""
# As we only run convolution over a single window that is exactly the size of the convolutional filter
# we can use FullyConnected instead of Convolution for efficiency reasons. Additionally we do not need to
# perform any masking.
num_hidden = self._pre_activation_num_hidden()
# (batch_size, num_hidden, kernel_width)
data = mx.sym.swapaxes(data, dim1=1, dim2=2)
# (batch_size, num_hidden * kernel_width)
data = mx.sym.reshape(data, shape=(0, -3))
# (preact_num_hidden, num_hidden * kernel_width)
weight = mx.sym.reshape(self.conv_weight, shape=(0, -3))
data_conv = mx.sym.FullyConnected(data=data,
weight=weight,
bias=self.conv_bias,
num_hidden=num_hidden)
# (batch_size, num_hidden, 1)
data_conv = mx.sym.expand_dims(data_conv, axis=2)
return self._post_convolution(data_conv) | def function[step, parameter[self, data]]:
constant[
Run convolution over a single position. The data must be exactly as wide as the convolution filters.
:param data: Shape: (batch_size, kernel_width, num_hidden).
:return: Single result of a convolution. Shape: (batch_size, 1, num_hidden).
]
variable[num_hidden] assign[=] call[name[self]._pre_activation_num_hidden, parameter[]]
variable[data] assign[=] call[name[mx].sym.swapaxes, parameter[name[data]]]
variable[data] assign[=] call[name[mx].sym.reshape, parameter[name[data]]]
variable[weight] assign[=] call[name[mx].sym.reshape, parameter[name[self].conv_weight]]
variable[data_conv] assign[=] call[name[mx].sym.FullyConnected, parameter[]]
variable[data_conv] assign[=] call[name[mx].sym.expand_dims, parameter[name[data_conv]]]
return[call[name[self]._post_convolution, parameter[name[data_conv]]]] | keyword[def] identifier[step] ( identifier[self] , identifier[data] ):
literal[string]
identifier[num_hidden] = identifier[self] . identifier[_pre_activation_num_hidden] ()
identifier[data] = identifier[mx] . identifier[sym] . identifier[swapaxes] ( identifier[data] , identifier[dim1] = literal[int] , identifier[dim2] = literal[int] )
identifier[data] = identifier[mx] . identifier[sym] . identifier[reshape] ( identifier[data] , identifier[shape] =( literal[int] ,- literal[int] ))
identifier[weight] = identifier[mx] . identifier[sym] . identifier[reshape] ( identifier[self] . identifier[conv_weight] , identifier[shape] =( literal[int] ,- literal[int] ))
identifier[data_conv] = identifier[mx] . identifier[sym] . identifier[FullyConnected] ( identifier[data] = identifier[data] ,
identifier[weight] = identifier[weight] ,
identifier[bias] = identifier[self] . identifier[conv_bias] ,
identifier[num_hidden] = identifier[num_hidden] )
identifier[data_conv] = identifier[mx] . identifier[sym] . identifier[expand_dims] ( identifier[data_conv] , identifier[axis] = literal[int] )
keyword[return] identifier[self] . identifier[_post_convolution] ( identifier[data_conv] ) | def step(self, data):
"""
Run convolution over a single position. The data must be exactly as wide as the convolution filters.
:param data: Shape: (batch_size, kernel_width, num_hidden).
:return: Single result of a convolution. Shape: (batch_size, 1, num_hidden).
"""
# As we only run convolution over a single window that is exactly the size of the convolutional filter
# we can use FullyConnected instead of Convolution for efficiency reasons. Additionally we do not need to
# perform any masking.
num_hidden = self._pre_activation_num_hidden()
# (batch_size, num_hidden, kernel_width)
data = mx.sym.swapaxes(data, dim1=1, dim2=2)
# (batch_size, num_hidden * kernel_width)
data = mx.sym.reshape(data, shape=(0, -3))
# (preact_num_hidden, num_hidden * kernel_width)
weight = mx.sym.reshape(self.conv_weight, shape=(0, -3))
data_conv = mx.sym.FullyConnected(data=data, weight=weight, bias=self.conv_bias, num_hidden=num_hidden)
# (batch_size, num_hidden, 1)
data_conv = mx.sym.expand_dims(data_conv, axis=2)
return self._post_convolution(data_conv) |
def change_contact_host_notification_timeperiod(self, contact, notification_timeperiod):
"""Change contact host notification timeperiod value
Format of the line that triggers function call::
CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod>
:param contact: contact to edit
:type contact: alignak.objects.contact.Contact
:param notification_timeperiod: timeperiod to set
:type notification_timeperiod: alignak.objects.timeperiod.Timeperiod
:return: None
"""
# todo: deprecate this
contact.modified_host_attributes |= DICT_MODATTR["MODATTR_NOTIFICATION_TIMEPERIOD"].value
contact.host_notification_period = notification_timeperiod
self.send_an_element(contact.get_update_status_brok()) | def function[change_contact_host_notification_timeperiod, parameter[self, contact, notification_timeperiod]]:
constant[Change contact host notification timeperiod value
Format of the line that triggers function call::
CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod>
:param contact: contact to edit
:type contact: alignak.objects.contact.Contact
:param notification_timeperiod: timeperiod to set
:type notification_timeperiod: alignak.objects.timeperiod.Timeperiod
:return: None
]
<ast.AugAssign object at 0x7da18bc722f0>
name[contact].host_notification_period assign[=] name[notification_timeperiod]
call[name[self].send_an_element, parameter[call[name[contact].get_update_status_brok, parameter[]]]] | keyword[def] identifier[change_contact_host_notification_timeperiod] ( identifier[self] , identifier[contact] , identifier[notification_timeperiod] ):
literal[string]
identifier[contact] . identifier[modified_host_attributes] |= identifier[DICT_MODATTR] [ literal[string] ]. identifier[value]
identifier[contact] . identifier[host_notification_period] = identifier[notification_timeperiod]
identifier[self] . identifier[send_an_element] ( identifier[contact] . identifier[get_update_status_brok] ()) | def change_contact_host_notification_timeperiod(self, contact, notification_timeperiod):
"""Change contact host notification timeperiod value
Format of the line that triggers function call::
CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD;<contact_name>;<notification_timeperiod>
:param contact: contact to edit
:type contact: alignak.objects.contact.Contact
:param notification_timeperiod: timeperiod to set
:type notification_timeperiod: alignak.objects.timeperiod.Timeperiod
:return: None
"""
# todo: deprecate this
contact.modified_host_attributes |= DICT_MODATTR['MODATTR_NOTIFICATION_TIMEPERIOD'].value
contact.host_notification_period = notification_timeperiod
self.send_an_element(contact.get_update_status_brok()) |
def in_dir(
config_dir=os.path.expanduser('~/.tmuxp'), extensions=['.yml', '.yaml', '.json']
):
"""
Return a list of configs in ``config_dir``.
Parameters
----------
config_dir : str
directory to search
extensions : list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
list
"""
configs = []
for filename in os.listdir(config_dir):
if is_config_file(filename, extensions) and not filename.startswith('.'):
configs.append(filename)
return configs | def function[in_dir, parameter[config_dir, extensions]]:
constant[
Return a list of configs in ``config_dir``.
Parameters
----------
config_dir : str
directory to search
extensions : list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
list
]
variable[configs] assign[=] list[[]]
for taget[name[filename]] in starred[call[name[os].listdir, parameter[name[config_dir]]]] begin[:]
if <ast.BoolOp object at 0x7da1b1d885b0> begin[:]
call[name[configs].append, parameter[name[filename]]]
return[name[configs]] | keyword[def] identifier[in_dir] (
identifier[config_dir] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] ), identifier[extensions] =[ literal[string] , literal[string] , literal[string] ]
):
literal[string]
identifier[configs] =[]
keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[config_dir] ):
keyword[if] identifier[is_config_file] ( identifier[filename] , identifier[extensions] ) keyword[and] keyword[not] identifier[filename] . identifier[startswith] ( literal[string] ):
identifier[configs] . identifier[append] ( identifier[filename] )
keyword[return] identifier[configs] | def in_dir(config_dir=os.path.expanduser('~/.tmuxp'), extensions=['.yml', '.yaml', '.json']):
"""
Return a list of configs in ``config_dir``.
Parameters
----------
config_dir : str
directory to search
extensions : list
filetypes to check (e.g. ``['.yaml', '.json']``).
Returns
-------
list
"""
configs = []
for filename in os.listdir(config_dir):
if is_config_file(filename, extensions) and (not filename.startswith('.')):
configs.append(filename) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']]
return configs |
def default_matrix(self):
"""The default calibration matrix for this device.
On most devices, this is the identity matrix. If the udev property
``LIBINPUT_CALIBRATION_MATRIX`` is set on the respective udev device,
that property's value becomes the default matrix, see
`Static device configuration via udev`_.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described
in :meth:`config_calibration_set_matrix`.
"""
matrix = (c_float * 6)()
rc = self._libinput \
.libinput_device_config_calibration_get_default_matrix(
self._handle, matrix)
return rc, tuple(matrix) | def function[default_matrix, parameter[self]]:
constant[The default calibration matrix for this device.
On most devices, this is the identity matrix. If the udev property
``LIBINPUT_CALIBRATION_MATRIX`` is set on the respective udev device,
that property's value becomes the default matrix, see
`Static device configuration via udev`_.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described
in :meth:`config_calibration_set_matrix`.
]
variable[matrix] assign[=] call[binary_operation[name[c_float] * constant[6]], parameter[]]
variable[rc] assign[=] call[name[self]._libinput.libinput_device_config_calibration_get_default_matrix, parameter[name[self]._handle, name[matrix]]]
return[tuple[[<ast.Name object at 0x7da18f09f010>, <ast.Call object at 0x7da20c7ca7a0>]]] | keyword[def] identifier[default_matrix] ( identifier[self] ):
literal[string]
identifier[matrix] =( identifier[c_float] * literal[int] )()
identifier[rc] = identifier[self] . identifier[_libinput] . identifier[libinput_device_config_calibration_get_default_matrix] (
identifier[self] . identifier[_handle] , identifier[matrix] )
keyword[return] identifier[rc] , identifier[tuple] ( identifier[matrix] ) | def default_matrix(self):
"""The default calibration matrix for this device.
On most devices, this is the identity matrix. If the udev property
``LIBINPUT_CALIBRATION_MATRIX`` is set on the respective udev device,
that property's value becomes the default matrix, see
`Static device configuration via udev`_.
Returns:
(bool, (float, float, float, float, float, float)): :obj:`False` if
no calibration is set and
the returned matrix is the identity matrix, :obj:`True`
otherwise. :obj:`tuple` representing the first two rows of
a 3x3 matrix as described
in :meth:`config_calibration_set_matrix`.
"""
matrix = (c_float * 6)()
rc = self._libinput.libinput_device_config_calibration_get_default_matrix(self._handle, matrix)
return (rc, tuple(matrix)) |
async def fetch_signatures(endpoint, protocol, idgen):
"""Request available methods for the service."""
async with aiohttp.ClientSession() as session:
req = {
"method": "getMethodTypes",
"params": [''],
"version": "1.0",
"id": next(idgen),
}
if protocol == ProtocolType.WebSocket:
async with session.ws_connect(endpoint, timeout=2) as s:
await s.send_json(req)
res = await s.receive_json()
return res
else:
res = await session.post(endpoint, json=req)
json = await res.json()
return json | <ast.AsyncFunctionDef object at 0x7da18f00e710> | keyword[async] keyword[def] identifier[fetch_signatures] ( identifier[endpoint] , identifier[protocol] , identifier[idgen] ):
literal[string]
keyword[async] keyword[with] identifier[aiohttp] . identifier[ClientSession] () keyword[as] identifier[session] :
identifier[req] ={
literal[string] : literal[string] ,
literal[string] :[ literal[string] ],
literal[string] : literal[string] ,
literal[string] : identifier[next] ( identifier[idgen] ),
}
keyword[if] identifier[protocol] == identifier[ProtocolType] . identifier[WebSocket] :
keyword[async] keyword[with] identifier[session] . identifier[ws_connect] ( identifier[endpoint] , identifier[timeout] = literal[int] ) keyword[as] identifier[s] :
keyword[await] identifier[s] . identifier[send_json] ( identifier[req] )
identifier[res] = keyword[await] identifier[s] . identifier[receive_json] ()
keyword[return] identifier[res]
keyword[else] :
identifier[res] = keyword[await] identifier[session] . identifier[post] ( identifier[endpoint] , identifier[json] = identifier[req] )
identifier[json] = keyword[await] identifier[res] . identifier[json] ()
keyword[return] identifier[json] | async def fetch_signatures(endpoint, protocol, idgen):
"""Request available methods for the service."""
async with aiohttp.ClientSession() as session:
req = {'method': 'getMethodTypes', 'params': [''], 'version': '1.0', 'id': next(idgen)}
if protocol == ProtocolType.WebSocket:
async with session.ws_connect(endpoint, timeout=2) as s:
await s.send_json(req)
res = await s.receive_json()
return res # depends on [control=['if'], data=[]]
else:
res = await session.post(endpoint, json=req)
json = await res.json()
return json |
def alpha(self, a=None):
"""Set/get actor's transparency."""
if a is not None:
self.GetProperty().SetOpacity(a)
return self
else:
return self.GetProperty().GetOpacity() | def function[alpha, parameter[self, a]]:
constant[Set/get actor's transparency.]
if compare[name[a] is_not constant[None]] begin[:]
call[call[name[self].GetProperty, parameter[]].SetOpacity, parameter[name[a]]]
return[name[self]] | keyword[def] identifier[alpha] ( identifier[self] , identifier[a] = keyword[None] ):
literal[string]
keyword[if] identifier[a] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[GetProperty] (). identifier[SetOpacity] ( identifier[a] )
keyword[return] identifier[self]
keyword[else] :
keyword[return] identifier[self] . identifier[GetProperty] (). identifier[GetOpacity] () | def alpha(self, a=None):
"""Set/get actor's transparency."""
if a is not None:
self.GetProperty().SetOpacity(a)
return self # depends on [control=['if'], data=['a']]
else:
return self.GetProperty().GetOpacity() |
def set_equation_from_string(self, equation_str, fail_silently=False,
check_equation=True):
"""Set equation attribute from a string.
Checks to see that the string is well-formed, and
then uses sympy.sympify to evaluate.
Args:
equation_str (str): A string representation (in valid Python)
of the equation to be added.
fail_silently (bool): Whether to raise sympy.SympifyError if
equation cannot be parsed. Useful if calling from other
application.
check_equation (bool): Whether or not to run regex check
on the equation_str argument.
"""
if check_equation:
regex_check(equation_str)
# Create Queue to allow for timeout
q = multiprocessing.Queue()
def prep(conn):
equation, error = None, False
try:
equation = sympy.sympify(equation_str)
except sympy.SympifyError:
error = True
q.put((equation, error))
p = multiprocessing.Process(target=prep, args=(q,))
p.start()
# See if we can get the equation within 5 seconds
try:
equation, error = q.get(timeout=5)
except queue.Empty:
equation, error = None, None
q.close()
# If the process is still running, kill it
if p.is_alive():
p.terminate()
p.join()
# Check if error was raised in sympify call.
# If we don't want to fail silently, recall sympify to reraise error
if error and not fail_silently:
sympy.sympify(equation_str)
self.equation = equation | def function[set_equation_from_string, parameter[self, equation_str, fail_silently, check_equation]]:
constant[Set equation attribute from a string.
Checks to see that the string is well-formed, and
then uses sympy.sympify to evaluate.
Args:
equation_str (str): A string representation (in valid Python)
of the equation to be added.
fail_silently (bool): Whether to raise sympy.SympifyError if
equation cannot be parsed. Useful if calling from other
application.
check_equation (bool): Whether or not to run regex check
on the equation_str argument.
]
if name[check_equation] begin[:]
call[name[regex_check], parameter[name[equation_str]]]
variable[q] assign[=] call[name[multiprocessing].Queue, parameter[]]
def function[prep, parameter[conn]]:
<ast.Tuple object at 0x7da20c6e7250> assign[=] tuple[[<ast.Constant object at 0x7da20c6e5c60>, <ast.Constant object at 0x7da20c6e4730>]]
<ast.Try object at 0x7da20c6e6ce0>
call[name[q].put, parameter[tuple[[<ast.Name object at 0x7da20c6e6020>, <ast.Name object at 0x7da20c6e46a0>]]]]
variable[p] assign[=] call[name[multiprocessing].Process, parameter[]]
call[name[p].start, parameter[]]
<ast.Try object at 0x7da20c6e64a0>
call[name[q].close, parameter[]]
if call[name[p].is_alive, parameter[]] begin[:]
call[name[p].terminate, parameter[]]
call[name[p].join, parameter[]]
if <ast.BoolOp object at 0x7da20c6e5ed0> begin[:]
call[name[sympy].sympify, parameter[name[equation_str]]]
name[self].equation assign[=] name[equation] | keyword[def] identifier[set_equation_from_string] ( identifier[self] , identifier[equation_str] , identifier[fail_silently] = keyword[False] ,
identifier[check_equation] = keyword[True] ):
literal[string]
keyword[if] identifier[check_equation] :
identifier[regex_check] ( identifier[equation_str] )
identifier[q] = identifier[multiprocessing] . identifier[Queue] ()
keyword[def] identifier[prep] ( identifier[conn] ):
identifier[equation] , identifier[error] = keyword[None] , keyword[False]
keyword[try] :
identifier[equation] = identifier[sympy] . identifier[sympify] ( identifier[equation_str] )
keyword[except] identifier[sympy] . identifier[SympifyError] :
identifier[error] = keyword[True]
identifier[q] . identifier[put] (( identifier[equation] , identifier[error] ))
identifier[p] = identifier[multiprocessing] . identifier[Process] ( identifier[target] = identifier[prep] , identifier[args] =( identifier[q] ,))
identifier[p] . identifier[start] ()
keyword[try] :
identifier[equation] , identifier[error] = identifier[q] . identifier[get] ( identifier[timeout] = literal[int] )
keyword[except] identifier[queue] . identifier[Empty] :
identifier[equation] , identifier[error] = keyword[None] , keyword[None]
identifier[q] . identifier[close] ()
keyword[if] identifier[p] . identifier[is_alive] ():
identifier[p] . identifier[terminate] ()
identifier[p] . identifier[join] ()
keyword[if] identifier[error] keyword[and] keyword[not] identifier[fail_silently] :
identifier[sympy] . identifier[sympify] ( identifier[equation_str] )
identifier[self] . identifier[equation] = identifier[equation] | def set_equation_from_string(self, equation_str, fail_silently=False, check_equation=True):
"""Set equation attribute from a string.
Checks to see that the string is well-formed, and
then uses sympy.sympify to evaluate.
Args:
equation_str (str): A string representation (in valid Python)
of the equation to be added.
fail_silently (bool): Whether to raise sympy.SympifyError if
equation cannot be parsed. Useful if calling from other
application.
check_equation (bool): Whether or not to run regex check
on the equation_str argument.
"""
if check_equation:
regex_check(equation_str) # depends on [control=['if'], data=[]]
# Create Queue to allow for timeout
q = multiprocessing.Queue()
def prep(conn):
(equation, error) = (None, False)
try:
equation = sympy.sympify(equation_str) # depends on [control=['try'], data=[]]
except sympy.SympifyError:
error = True # depends on [control=['except'], data=[]]
q.put((equation, error))
p = multiprocessing.Process(target=prep, args=(q,))
p.start()
# See if we can get the equation within 5 seconds
try:
(equation, error) = q.get(timeout=5) # depends on [control=['try'], data=[]]
except queue.Empty:
(equation, error) = (None, None) # depends on [control=['except'], data=[]]
q.close()
# If the process is still running, kill it
if p.is_alive():
p.terminate()
p.join() # depends on [control=['if'], data=[]]
# Check if error was raised in sympify call.
# If we don't want to fail silently, recall sympify to reraise error
if error and (not fail_silently):
sympy.sympify(equation_str) # depends on [control=['if'], data=[]]
self.equation = equation |
def _get_cpu_virtualization(self):
"""get cpu virtualization status."""
try:
cpu_vt = self._get_bios_setting('ProcVirtualization')
except exception.IloCommandNotSupportedError:
return False
if cpu_vt == 'Enabled':
vt_status = True
else:
vt_status = False
return vt_status | def function[_get_cpu_virtualization, parameter[self]]:
constant[get cpu virtualization status.]
<ast.Try object at 0x7da20c9901f0>
if compare[name[cpu_vt] equal[==] constant[Enabled]] begin[:]
variable[vt_status] assign[=] constant[True]
return[name[vt_status]] | keyword[def] identifier[_get_cpu_virtualization] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[cpu_vt] = identifier[self] . identifier[_get_bios_setting] ( literal[string] )
keyword[except] identifier[exception] . identifier[IloCommandNotSupportedError] :
keyword[return] keyword[False]
keyword[if] identifier[cpu_vt] == literal[string] :
identifier[vt_status] = keyword[True]
keyword[else] :
identifier[vt_status] = keyword[False]
keyword[return] identifier[vt_status] | def _get_cpu_virtualization(self):
"""get cpu virtualization status."""
try:
cpu_vt = self._get_bios_setting('ProcVirtualization') # depends on [control=['try'], data=[]]
except exception.IloCommandNotSupportedError:
return False # depends on [control=['except'], data=[]]
if cpu_vt == 'Enabled':
vt_status = True # depends on [control=['if'], data=[]]
else:
vt_status = False
return vt_status |
def get_asset_query_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the asset query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetQuerySession) - an
``AssetQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_query()`` is ``true``.*
"""
return AssetQuerySession(
self._provider_manager.get_asset_query_session(proxy),
self._config_map) | def function[get_asset_query_session, parameter[self, proxy]]:
constant[Gets the ``OsidSession`` associated with the asset query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetQuerySession) - an
``AssetQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_query()`` is ``true``.*
]
return[call[name[AssetQuerySession], parameter[call[name[self]._provider_manager.get_asset_query_session, parameter[name[proxy]]], name[self]._config_map]]] | keyword[def] identifier[get_asset_query_session] ( identifier[self] , identifier[proxy] = keyword[None] ):
literal[string]
keyword[return] identifier[AssetQuerySession] (
identifier[self] . identifier[_provider_manager] . identifier[get_asset_query_session] ( identifier[proxy] ),
identifier[self] . identifier[_config_map] ) | def get_asset_query_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the asset query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetQuerySession) - an
``AssetQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_query()`` is ``true``.*
"""
return AssetQuerySession(self._provider_manager.get_asset_query_session(proxy), self._config_map) |
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
for name, child in self._children.items():
child.render(**kwargs)
return self._template.render(this=self, kwargs=kwargs) | def function[render, parameter[self]]:
constant[Renders the HTML representation of the element.]
for taget[tuple[[<ast.Name object at 0x7da1b121a3b0>, <ast.Name object at 0x7da1b1219210>]]] in starred[call[name[self]._children.items, parameter[]]] begin[:]
call[name[child].render, parameter[]]
return[call[name[self]._template.render, parameter[]]] | keyword[def] identifier[render] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[name] , identifier[child] keyword[in] identifier[self] . identifier[_children] . identifier[items] ():
identifier[child] . identifier[render] (** identifier[kwargs] )
keyword[return] identifier[self] . identifier[_template] . identifier[render] ( identifier[this] = identifier[self] , identifier[kwargs] = identifier[kwargs] ) | def render(self, **kwargs):
"""Renders the HTML representation of the element."""
for (name, child) in self._children.items():
child.render(**kwargs) # depends on [control=['for'], data=[]]
return self._template.render(this=self, kwargs=kwargs) |
def as_csv(self):
"""Return a CSV representation as a string"""
from io import StringIO
s = StringIO()
w = csv.writer(s)
for row in self.rows:
w.writerow(row)
return s.getvalue() | def function[as_csv, parameter[self]]:
constant[Return a CSV representation as a string]
from relative_module[io] import module[StringIO]
variable[s] assign[=] call[name[StringIO], parameter[]]
variable[w] assign[=] call[name[csv].writer, parameter[name[s]]]
for taget[name[row]] in starred[name[self].rows] begin[:]
call[name[w].writerow, parameter[name[row]]]
return[call[name[s].getvalue, parameter[]]] | keyword[def] identifier[as_csv] ( identifier[self] ):
literal[string]
keyword[from] identifier[io] keyword[import] identifier[StringIO]
identifier[s] = identifier[StringIO] ()
identifier[w] = identifier[csv] . identifier[writer] ( identifier[s] )
keyword[for] identifier[row] keyword[in] identifier[self] . identifier[rows] :
identifier[w] . identifier[writerow] ( identifier[row] )
keyword[return] identifier[s] . identifier[getvalue] () | def as_csv(self):
"""Return a CSV representation as a string"""
from io import StringIO
s = StringIO()
w = csv.writer(s)
for row in self.rows:
w.writerow(row) # depends on [control=['for'], data=['row']]
return s.getvalue() |
def addSuppression(self, suppressionList):
"""
This method can be used to add patters of warnings that should
not be counted.
It takes a single argument, a list of patterns.
Each pattern is a 4-tuple (FILE-RE, WARN-RE, START, END).
FILE-RE is a regular expression (string or compiled regexp), or None.
If None, the pattern matches all files, else only files matching the
regexp. If directoryEnterPattern is specified in the class constructor,
matching is against the full path name, eg. src/main.c.
WARN-RE is similarly a regular expression matched against the
text of the warning, or None to match all warnings.
START and END form an inclusive line number range to match against. If
START is None, there is no lower bound, similarly if END is none there
is no upper bound."""
for fileRe, warnRe, start, end in suppressionList:
if fileRe is not None and isinstance(fileRe, str):
fileRe = re.compile(fileRe)
if warnRe is not None and isinstance(warnRe, str):
warnRe = re.compile(warnRe)
self.suppressions.append((fileRe, warnRe, start, end)) | def function[addSuppression, parameter[self, suppressionList]]:
constant[
This method can be used to add patters of warnings that should
not be counted.
It takes a single argument, a list of patterns.
Each pattern is a 4-tuple (FILE-RE, WARN-RE, START, END).
FILE-RE is a regular expression (string or compiled regexp), or None.
If None, the pattern matches all files, else only files matching the
regexp. If directoryEnterPattern is specified in the class constructor,
matching is against the full path name, eg. src/main.c.
WARN-RE is similarly a regular expression matched against the
text of the warning, or None to match all warnings.
START and END form an inclusive line number range to match against. If
START is None, there is no lower bound, similarly if END is none there
is no upper bound.]
for taget[tuple[[<ast.Name object at 0x7da18f58f5e0>, <ast.Name object at 0x7da18f58dea0>, <ast.Name object at 0x7da1b21e05e0>, <ast.Name object at 0x7da1b21e1570>]]] in starred[name[suppressionList]] begin[:]
if <ast.BoolOp object at 0x7da1b21e0340> begin[:]
variable[fileRe] assign[=] call[name[re].compile, parameter[name[fileRe]]]
if <ast.BoolOp object at 0x7da1b21e2b30> begin[:]
variable[warnRe] assign[=] call[name[re].compile, parameter[name[warnRe]]]
call[name[self].suppressions.append, parameter[tuple[[<ast.Name object at 0x7da1b1c3d450>, <ast.Name object at 0x7da1b1c3ce50>, <ast.Name object at 0x7da1b1c3f520>, <ast.Name object at 0x7da1b1c3cee0>]]]] | keyword[def] identifier[addSuppression] ( identifier[self] , identifier[suppressionList] ):
literal[string]
keyword[for] identifier[fileRe] , identifier[warnRe] , identifier[start] , identifier[end] keyword[in] identifier[suppressionList] :
keyword[if] identifier[fileRe] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[fileRe] , identifier[str] ):
identifier[fileRe] = identifier[re] . identifier[compile] ( identifier[fileRe] )
keyword[if] identifier[warnRe] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[warnRe] , identifier[str] ):
identifier[warnRe] = identifier[re] . identifier[compile] ( identifier[warnRe] )
identifier[self] . identifier[suppressions] . identifier[append] (( identifier[fileRe] , identifier[warnRe] , identifier[start] , identifier[end] )) | def addSuppression(self, suppressionList):
"""
This method can be used to add patters of warnings that should
not be counted.
It takes a single argument, a list of patterns.
Each pattern is a 4-tuple (FILE-RE, WARN-RE, START, END).
FILE-RE is a regular expression (string or compiled regexp), or None.
If None, the pattern matches all files, else only files matching the
regexp. If directoryEnterPattern is specified in the class constructor,
matching is against the full path name, eg. src/main.c.
WARN-RE is similarly a regular expression matched against the
text of the warning, or None to match all warnings.
START and END form an inclusive line number range to match against. If
START is None, there is no lower bound, similarly if END is none there
is no upper bound."""
for (fileRe, warnRe, start, end) in suppressionList:
if fileRe is not None and isinstance(fileRe, str):
fileRe = re.compile(fileRe) # depends on [control=['if'], data=[]]
if warnRe is not None and isinstance(warnRe, str):
warnRe = re.compile(warnRe) # depends on [control=['if'], data=[]]
self.suppressions.append((fileRe, warnRe, start, end)) # depends on [control=['for'], data=[]] |
def get(self, orig_key):
"""Get cache entry for key, or return None."""
resp = requests.Response()
key = self._clean_key(orig_key)
path = os.path.join(self.cache_dir, key)
try:
with open(path, 'rb') as f:
# read lines one at a time
while True:
line = f.readline().decode('utf8').strip('\r\n')
# set headers
if self.check_last_modified and re.search("last-modified", line, flags=re.I):
# line contains last modified header
head_resp = requests.head(orig_key)
try:
new_lm = head_resp.headers['last-modified']
old_lm = line[string.find(line, ':') + 1:].strip()
if old_lm != new_lm:
# last modified timestamps don't match, need to download again
return None
except KeyError:
# no last modified header present, so redownload
return None
header = self._header_re.match(line)
if header:
resp.headers[header.group(1)] = header.group(2)
else:
break
# everything left is the real content
resp._content = f.read()
# status & encoding will be in headers, but are faked
# need to split spaces out of status to get code (e.g. '200 OK')
resp.status_code = int(resp.headers.pop('status').split(' ')[0])
resp.encoding = resp.headers.pop('encoding')
resp.url = resp.headers.get('content-location', orig_key)
# TODO: resp.request = request
return resp
except IOError:
return None | def function[get, parameter[self, orig_key]]:
constant[Get cache entry for key, or return None.]
variable[resp] assign[=] call[name[requests].Response, parameter[]]
variable[key] assign[=] call[name[self]._clean_key, parameter[name[orig_key]]]
variable[path] assign[=] call[name[os].path.join, parameter[name[self].cache_dir, name[key]]]
<ast.Try object at 0x7da1b26d4b20> | keyword[def] identifier[get] ( identifier[self] , identifier[orig_key] ):
literal[string]
identifier[resp] = identifier[requests] . identifier[Response] ()
identifier[key] = identifier[self] . identifier[_clean_key] ( identifier[orig_key] )
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[cache_dir] , identifier[key] )
keyword[try] :
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
keyword[while] keyword[True] :
identifier[line] = identifier[f] . identifier[readline] (). identifier[decode] ( literal[string] ). identifier[strip] ( literal[string] )
keyword[if] identifier[self] . identifier[check_last_modified] keyword[and] identifier[re] . identifier[search] ( literal[string] , identifier[line] , identifier[flags] = identifier[re] . identifier[I] ):
identifier[head_resp] = identifier[requests] . identifier[head] ( identifier[orig_key] )
keyword[try] :
identifier[new_lm] = identifier[head_resp] . identifier[headers] [ literal[string] ]
identifier[old_lm] = identifier[line] [ identifier[string] . identifier[find] ( identifier[line] , literal[string] )+ literal[int] :]. identifier[strip] ()
keyword[if] identifier[old_lm] != identifier[new_lm] :
keyword[return] keyword[None]
keyword[except] identifier[KeyError] :
keyword[return] keyword[None]
identifier[header] = identifier[self] . identifier[_header_re] . identifier[match] ( identifier[line] )
keyword[if] identifier[header] :
identifier[resp] . identifier[headers] [ identifier[header] . identifier[group] ( literal[int] )]= identifier[header] . identifier[group] ( literal[int] )
keyword[else] :
keyword[break]
identifier[resp] . identifier[_content] = identifier[f] . identifier[read] ()
identifier[resp] . identifier[status_code] = identifier[int] ( identifier[resp] . identifier[headers] . identifier[pop] ( literal[string] ). identifier[split] ( literal[string] )[ literal[int] ])
identifier[resp] . identifier[encoding] = identifier[resp] . identifier[headers] . identifier[pop] ( literal[string] )
identifier[resp] . identifier[url] = identifier[resp] . identifier[headers] . identifier[get] ( literal[string] , identifier[orig_key] )
keyword[return] identifier[resp]
keyword[except] identifier[IOError] :
keyword[return] keyword[None] | def get(self, orig_key):
"""Get cache entry for key, or return None."""
resp = requests.Response()
key = self._clean_key(orig_key)
path = os.path.join(self.cache_dir, key)
try:
with open(path, 'rb') as f:
# read lines one at a time
while True:
line = f.readline().decode('utf8').strip('\r\n')
# set headers
if self.check_last_modified and re.search('last-modified', line, flags=re.I):
# line contains last modified header
head_resp = requests.head(orig_key)
try:
new_lm = head_resp.headers['last-modified']
old_lm = line[string.find(line, ':') + 1:].strip()
if old_lm != new_lm:
# last modified timestamps don't match, need to download again
return None # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
# no last modified header present, so redownload
return None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
header = self._header_re.match(line)
if header:
resp.headers[header.group(1)] = header.group(2) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
# everything left is the real content
resp._content = f.read() # depends on [control=['with'], data=['f']]
# status & encoding will be in headers, but are faked
# need to split spaces out of status to get code (e.g. '200 OK')
resp.status_code = int(resp.headers.pop('status').split(' ')[0])
resp.encoding = resp.headers.pop('encoding')
resp.url = resp.headers.get('content-location', orig_key)
# TODO: resp.request = request
return resp # depends on [control=['try'], data=[]]
except IOError:
return None # depends on [control=['except'], data=[]] |
def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in model.named_parameters():
if name in histogram_parameters:
self.add_train_histogram("parameter_histogram/" + name, param) | def function[log_histograms, parameter[self, model, histogram_parameters]]:
constant[
Send histograms of parameters to tensorboard.
]
for taget[tuple[[<ast.Name object at 0x7da1b1f94c10>, <ast.Name object at 0x7da1b1f952a0>]]] in starred[call[name[model].named_parameters, parameter[]]] begin[:]
if compare[name[name] in name[histogram_parameters]] begin[:]
call[name[self].add_train_histogram, parameter[binary_operation[constant[parameter_histogram/] + name[name]], name[param]]] | keyword[def] identifier[log_histograms] ( identifier[self] , identifier[model] : identifier[Model] , identifier[histogram_parameters] : identifier[Set] [ identifier[str] ])-> keyword[None] :
literal[string]
keyword[for] identifier[name] , identifier[param] keyword[in] identifier[model] . identifier[named_parameters] ():
keyword[if] identifier[name] keyword[in] identifier[histogram_parameters] :
identifier[self] . identifier[add_train_histogram] ( literal[string] + identifier[name] , identifier[param] ) | def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for (name, param) in model.named_parameters():
if name in histogram_parameters:
self.add_train_histogram('parameter_histogram/' + name, param) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=[]] |
def ordinal_encode(self, column, values=None, inplace=False):
"""Encode column as ordinal values and mark it as categorical.
The existing column is renamed to a hidden column and replaced by a numerical columns
with values between [0, len(values)-1].
"""
column = _ensure_string_from_expression(column)
df = self if inplace else self.copy()
# for the codes, we need to work on the unfiltered dataset, since the filter
# may change, and we also cannot add an array that is smaller in length
df_unfiltered = df.copy()
# maybe we need some filter manipulation methods
df_unfiltered.select_nothing(name=FILTER_SELECTION_NAME)
df_unfiltered._length_unfiltered = df._length_original
df_unfiltered.set_active_range(0, df._length_original)
# codes point to the index of found_values
# meaning: found_values[codes[0]] == ds[column].values[0]
found_values, codes = df_unfiltered.unique(column, return_inverse=True)
if values is None:
values = found_values
else:
# we have specified which values we should support, anything
# not found will be masked
translation = np.zeros(len(found_values), dtype=np.uint64)
# mark values that are in the column, but not in values with a special value
missing_value = len(found_values)
for i, found_value in enumerate(found_values):
try:
found_value = found_value.decode('ascii')
except:
pass
if found_value not in values: # not present, we need a missing value
translation[i] = missing_value
else:
translation[i] = values.index(found_value)
codes = translation[codes]
if missing_value in translation:
# all special values will be marked as missing
codes = np.ma.masked_array(codes, codes==missing_value)
original_column = df.rename_column(column, '__original_' + column, unique=True)
labels = [str(k) for k in values]
df.add_column(column, codes)
df._categories[column] = dict(labels=labels, N=len(values), values=values)
return df | def function[ordinal_encode, parameter[self, column, values, inplace]]:
constant[Encode column as ordinal values and mark it as categorical.
The existing column is renamed to a hidden column and replaced by a numerical columns
with values between [0, len(values)-1].
]
variable[column] assign[=] call[name[_ensure_string_from_expression], parameter[name[column]]]
variable[df] assign[=] <ast.IfExp object at 0x7da207f99ab0>
variable[df_unfiltered] assign[=] call[name[df].copy, parameter[]]
call[name[df_unfiltered].select_nothing, parameter[]]
name[df_unfiltered]._length_unfiltered assign[=] name[df]._length_original
call[name[df_unfiltered].set_active_range, parameter[constant[0], name[df]._length_original]]
<ast.Tuple object at 0x7da207f9af80> assign[=] call[name[df_unfiltered].unique, parameter[name[column]]]
if compare[name[values] is constant[None]] begin[:]
variable[values] assign[=] name[found_values]
variable[original_column] assign[=] call[name[df].rename_column, parameter[name[column], binary_operation[constant[__original_] + name[column]]]]
variable[labels] assign[=] <ast.ListComp object at 0x7da18bc710c0>
call[name[df].add_column, parameter[name[column], name[codes]]]
call[name[df]._categories][name[column]] assign[=] call[name[dict], parameter[]]
return[name[df]] | keyword[def] identifier[ordinal_encode] ( identifier[self] , identifier[column] , identifier[values] = keyword[None] , identifier[inplace] = keyword[False] ):
literal[string]
identifier[column] = identifier[_ensure_string_from_expression] ( identifier[column] )
identifier[df] = identifier[self] keyword[if] identifier[inplace] keyword[else] identifier[self] . identifier[copy] ()
identifier[df_unfiltered] = identifier[df] . identifier[copy] ()
identifier[df_unfiltered] . identifier[select_nothing] ( identifier[name] = identifier[FILTER_SELECTION_NAME] )
identifier[df_unfiltered] . identifier[_length_unfiltered] = identifier[df] . identifier[_length_original]
identifier[df_unfiltered] . identifier[set_active_range] ( literal[int] , identifier[df] . identifier[_length_original] )
identifier[found_values] , identifier[codes] = identifier[df_unfiltered] . identifier[unique] ( identifier[column] , identifier[return_inverse] = keyword[True] )
keyword[if] identifier[values] keyword[is] keyword[None] :
identifier[values] = identifier[found_values]
keyword[else] :
identifier[translation] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[found_values] ), identifier[dtype] = identifier[np] . identifier[uint64] )
identifier[missing_value] = identifier[len] ( identifier[found_values] )
keyword[for] identifier[i] , identifier[found_value] keyword[in] identifier[enumerate] ( identifier[found_values] ):
keyword[try] :
identifier[found_value] = identifier[found_value] . identifier[decode] ( literal[string] )
keyword[except] :
keyword[pass]
keyword[if] identifier[found_value] keyword[not] keyword[in] identifier[values] :
identifier[translation] [ identifier[i] ]= identifier[missing_value]
keyword[else] :
identifier[translation] [ identifier[i] ]= identifier[values] . identifier[index] ( identifier[found_value] )
identifier[codes] = identifier[translation] [ identifier[codes] ]
keyword[if] identifier[missing_value] keyword[in] identifier[translation] :
identifier[codes] = identifier[np] . identifier[ma] . identifier[masked_array] ( identifier[codes] , identifier[codes] == identifier[missing_value] )
identifier[original_column] = identifier[df] . identifier[rename_column] ( identifier[column] , literal[string] + identifier[column] , identifier[unique] = keyword[True] )
identifier[labels] =[ identifier[str] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[values] ]
identifier[df] . identifier[add_column] ( identifier[column] , identifier[codes] )
identifier[df] . identifier[_categories] [ identifier[column] ]= identifier[dict] ( identifier[labels] = identifier[labels] , identifier[N] = identifier[len] ( identifier[values] ), identifier[values] = identifier[values] )
keyword[return] identifier[df] | def ordinal_encode(self, column, values=None, inplace=False):
"""Encode column as ordinal values and mark it as categorical.
The existing column is renamed to a hidden column and replaced by a numerical columns
with values between [0, len(values)-1].
"""
column = _ensure_string_from_expression(column)
df = self if inplace else self.copy()
# for the codes, we need to work on the unfiltered dataset, since the filter
# may change, and we also cannot add an array that is smaller in length
df_unfiltered = df.copy()
# maybe we need some filter manipulation methods
df_unfiltered.select_nothing(name=FILTER_SELECTION_NAME)
df_unfiltered._length_unfiltered = df._length_original
df_unfiltered.set_active_range(0, df._length_original)
# codes point to the index of found_values
# meaning: found_values[codes[0]] == ds[column].values[0]
(found_values, codes) = df_unfiltered.unique(column, return_inverse=True)
if values is None:
values = found_values # depends on [control=['if'], data=['values']]
else:
# we have specified which values we should support, anything
# not found will be masked
translation = np.zeros(len(found_values), dtype=np.uint64)
# mark values that are in the column, but not in values with a special value
missing_value = len(found_values)
for (i, found_value) in enumerate(found_values):
try:
found_value = found_value.decode('ascii') # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
if found_value not in values: # not present, we need a missing value
translation[i] = missing_value # depends on [control=['if'], data=[]]
else:
translation[i] = values.index(found_value) # depends on [control=['for'], data=[]]
codes = translation[codes]
if missing_value in translation:
# all special values will be marked as missing
codes = np.ma.masked_array(codes, codes == missing_value) # depends on [control=['if'], data=['missing_value']]
original_column = df.rename_column(column, '__original_' + column, unique=True)
labels = [str(k) for k in values]
df.add_column(column, codes)
df._categories[column] = dict(labels=labels, N=len(values), values=values)
return df |
def precheck():
"""
Pre-run dependency check
"""
binaries = ['make']
for bin in binaries:
if not which(bin):
msg = 'Dependency fail -- Unable to locate rquired binary: '
stdout_message('%s: %s' % (msg, ACCENT + bin + RESET))
return False
elif not root():
return False
return True | def function[precheck, parameter[]]:
constant[
Pre-run dependency check
]
variable[binaries] assign[=] list[[<ast.Constant object at 0x7da20c7cb880>]]
for taget[name[bin]] in starred[name[binaries]] begin[:]
if <ast.UnaryOp object at 0x7da20c7c99c0> begin[:]
variable[msg] assign[=] constant[Dependency fail -- Unable to locate rquired binary: ]
call[name[stdout_message], parameter[binary_operation[constant[%s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c7cbd60>, <ast.BinOp object at 0x7da20c7cbe80>]]]]]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[precheck] ():
literal[string]
identifier[binaries] =[ literal[string] ]
keyword[for] identifier[bin] keyword[in] identifier[binaries] :
keyword[if] keyword[not] identifier[which] ( identifier[bin] ):
identifier[msg] = literal[string]
identifier[stdout_message] ( literal[string] %( identifier[msg] , identifier[ACCENT] + identifier[bin] + identifier[RESET] ))
keyword[return] keyword[False]
keyword[elif] keyword[not] identifier[root] ():
keyword[return] keyword[False]
keyword[return] keyword[True] | def precheck():
"""
Pre-run dependency check
"""
binaries = ['make']
for bin in binaries:
if not which(bin):
msg = 'Dependency fail -- Unable to locate rquired binary: '
stdout_message('%s: %s' % (msg, ACCENT + bin + RESET))
return False # depends on [control=['if'], data=[]]
elif not root():
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['bin']]
return True |
def _download_metadata_archive(self):
"""Makes a remote call to the Project Gutenberg servers and downloads
the entire Project Gutenberg meta-data catalog. The catalog describes
the texts on Project Gutenberg in RDF. The function returns a
file-pointer to the catalog.
"""
with tempfile.NamedTemporaryFile(delete=False) as metadata_archive:
shutil.copyfileobj(urlopen(self.catalog_source), metadata_archive)
yield metadata_archive.name
remove(metadata_archive.name) | def function[_download_metadata_archive, parameter[self]]:
constant[Makes a remote call to the Project Gutenberg servers and downloads
the entire Project Gutenberg meta-data catalog. The catalog describes
the texts on Project Gutenberg in RDF. The function returns a
file-pointer to the catalog.
]
with call[name[tempfile].NamedTemporaryFile, parameter[]] begin[:]
call[name[shutil].copyfileobj, parameter[call[name[urlopen], parameter[name[self].catalog_source]], name[metadata_archive]]]
<ast.Yield object at 0x7da1b12b9f60>
call[name[remove], parameter[name[metadata_archive].name]] | keyword[def] identifier[_download_metadata_archive] ( identifier[self] ):
literal[string]
keyword[with] identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[delete] = keyword[False] ) keyword[as] identifier[metadata_archive] :
identifier[shutil] . identifier[copyfileobj] ( identifier[urlopen] ( identifier[self] . identifier[catalog_source] ), identifier[metadata_archive] )
keyword[yield] identifier[metadata_archive] . identifier[name]
identifier[remove] ( identifier[metadata_archive] . identifier[name] ) | def _download_metadata_archive(self):
"""Makes a remote call to the Project Gutenberg servers and downloads
the entire Project Gutenberg meta-data catalog. The catalog describes
the texts on Project Gutenberg in RDF. The function returns a
file-pointer to the catalog.
"""
with tempfile.NamedTemporaryFile(delete=False) as metadata_archive:
shutil.copyfileobj(urlopen(self.catalog_source), metadata_archive) # depends on [control=['with'], data=['metadata_archive']]
yield metadata_archive.name
remove(metadata_archive.name) |
def p_ranges(self, p):
"""ranges : ranges '|' range
| range"""
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]]
elif n == 2:
p[0] = [p[1]] | def function[p_ranges, parameter[self, p]]:
constant[ranges : ranges '|' range
| range]
variable[n] assign[=] call[name[len], parameter[name[p]]]
if compare[name[n] equal[==] constant[4]] begin[:]
call[name[p]][constant[0]] assign[=] binary_operation[call[name[p]][constant[1]] + list[[<ast.Subscript object at 0x7da1b01091b0>]]] | keyword[def] identifier[p_ranges] ( identifier[self] , identifier[p] ):
literal[string]
identifier[n] = identifier[len] ( identifier[p] )
keyword[if] identifier[n] == literal[int] :
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]+[ identifier[p] [ literal[int] ]]
keyword[elif] identifier[n] == literal[int] :
identifier[p] [ literal[int] ]=[ identifier[p] [ literal[int] ]] | def p_ranges(self, p):
"""ranges : ranges '|' range
| range"""
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]] # depends on [control=['if'], data=[]]
elif n == 2:
p[0] = [p[1]] # depends on [control=['if'], data=[]] |
def plot_target_trajectory(targets_x, targets_y, targets_z, ax):
"""Ajoute la trajectoire (liste des targets) au plot"""
ax.scatter(targets_x, targets_y, targets_z) | def function[plot_target_trajectory, parameter[targets_x, targets_y, targets_z, ax]]:
constant[Ajoute la trajectoire (liste des targets) au plot]
call[name[ax].scatter, parameter[name[targets_x], name[targets_y], name[targets_z]]] | keyword[def] identifier[plot_target_trajectory] ( identifier[targets_x] , identifier[targets_y] , identifier[targets_z] , identifier[ax] ):
literal[string]
identifier[ax] . identifier[scatter] ( identifier[targets_x] , identifier[targets_y] , identifier[targets_z] ) | def plot_target_trajectory(targets_x, targets_y, targets_z, ax):
"""Ajoute la trajectoire (liste des targets) au plot"""
ax.scatter(targets_x, targets_y, targets_z) |
def _param_callback(self, name, value):
"""Generic callback registered for all the groups"""
print('{0}: {1}'.format(name, value))
# Remove each parameter from the list and close the link when
# all are fetched
self._param_check_list.remove(name)
if len(self._param_check_list) == 0:
print('Have fetched all parameter values.')
# First remove all the group callbacks
for g in self._param_groups:
self._cf.param.remove_update_callback(group=g,
cb=self._param_callback)
# Create a new random value [0.00,1.00] for pid_attitude.pitch_kd
# and set it
pkd = random.random()
print('')
print('Write: pid_attitude.pitch_kd={:.2f}'.format(pkd))
self._cf.param.add_update_callback(group='pid_attitude',
name='pitch_kd',
cb=self._a_pitch_kd_callback)
# When setting a value the parameter is automatically read back
# and the registered callbacks will get the updated value
self._cf.param.set_value('pid_attitude.pitch_kd',
'{:.2f}'.format(pkd)) | def function[_param_callback, parameter[self, name, value]]:
constant[Generic callback registered for all the groups]
call[name[print], parameter[call[constant[{0}: {1}].format, parameter[name[name], name[value]]]]]
call[name[self]._param_check_list.remove, parameter[name[name]]]
if compare[call[name[len], parameter[name[self]._param_check_list]] equal[==] constant[0]] begin[:]
call[name[print], parameter[constant[Have fetched all parameter values.]]]
for taget[name[g]] in starred[name[self]._param_groups] begin[:]
call[name[self]._cf.param.remove_update_callback, parameter[]]
variable[pkd] assign[=] call[name[random].random, parameter[]]
call[name[print], parameter[constant[]]]
call[name[print], parameter[call[constant[Write: pid_attitude.pitch_kd={:.2f}].format, parameter[name[pkd]]]]]
call[name[self]._cf.param.add_update_callback, parameter[]]
call[name[self]._cf.param.set_value, parameter[constant[pid_attitude.pitch_kd], call[constant[{:.2f}].format, parameter[name[pkd]]]]] | keyword[def] identifier[_param_callback] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] ( identifier[name] , identifier[value] ))
identifier[self] . identifier[_param_check_list] . identifier[remove] ( identifier[name] )
keyword[if] identifier[len] ( identifier[self] . identifier[_param_check_list] )== literal[int] :
identifier[print] ( literal[string] )
keyword[for] identifier[g] keyword[in] identifier[self] . identifier[_param_groups] :
identifier[self] . identifier[_cf] . identifier[param] . identifier[remove_update_callback] ( identifier[group] = identifier[g] ,
identifier[cb] = identifier[self] . identifier[_param_callback] )
identifier[pkd] = identifier[random] . identifier[random] ()
identifier[print] ( literal[string] )
identifier[print] ( literal[string] . identifier[format] ( identifier[pkd] ))
identifier[self] . identifier[_cf] . identifier[param] . identifier[add_update_callback] ( identifier[group] = literal[string] ,
identifier[name] = literal[string] ,
identifier[cb] = identifier[self] . identifier[_a_pitch_kd_callback] )
identifier[self] . identifier[_cf] . identifier[param] . identifier[set_value] ( literal[string] ,
literal[string] . identifier[format] ( identifier[pkd] )) | def _param_callback(self, name, value):
"""Generic callback registered for all the groups"""
print('{0}: {1}'.format(name, value))
# Remove each parameter from the list and close the link when
# all are fetched
self._param_check_list.remove(name)
if len(self._param_check_list) == 0:
print('Have fetched all parameter values.')
# First remove all the group callbacks
for g in self._param_groups:
self._cf.param.remove_update_callback(group=g, cb=self._param_callback) # depends on [control=['for'], data=['g']]
# Create a new random value [0.00,1.00] for pid_attitude.pitch_kd
# and set it
pkd = random.random()
print('')
print('Write: pid_attitude.pitch_kd={:.2f}'.format(pkd))
self._cf.param.add_update_callback(group='pid_attitude', name='pitch_kd', cb=self._a_pitch_kd_callback)
# When setting a value the parameter is automatically read back
# and the registered callbacks will get the updated value
self._cf.param.set_value('pid_attitude.pitch_kd', '{:.2f}'.format(pkd)) # depends on [control=['if'], data=[]] |
def _load_metadata(self, handle):
"""Load archive members metadata."""
rarinfo = self._read_header(handle)
while rarinfo:
self.filelist.append(rarinfo)
self.NameToInfo[rarinfo.filename] = rarinfo
self._process_current(handle, constants.RAR_SKIP)
rarinfo = self._read_header(handle) | def function[_load_metadata, parameter[self, handle]]:
constant[Load archive members metadata.]
variable[rarinfo] assign[=] call[name[self]._read_header, parameter[name[handle]]]
while name[rarinfo] begin[:]
call[name[self].filelist.append, parameter[name[rarinfo]]]
call[name[self].NameToInfo][name[rarinfo].filename] assign[=] name[rarinfo]
call[name[self]._process_current, parameter[name[handle], name[constants].RAR_SKIP]]
variable[rarinfo] assign[=] call[name[self]._read_header, parameter[name[handle]]] | keyword[def] identifier[_load_metadata] ( identifier[self] , identifier[handle] ):
literal[string]
identifier[rarinfo] = identifier[self] . identifier[_read_header] ( identifier[handle] )
keyword[while] identifier[rarinfo] :
identifier[self] . identifier[filelist] . identifier[append] ( identifier[rarinfo] )
identifier[self] . identifier[NameToInfo] [ identifier[rarinfo] . identifier[filename] ]= identifier[rarinfo]
identifier[self] . identifier[_process_current] ( identifier[handle] , identifier[constants] . identifier[RAR_SKIP] )
identifier[rarinfo] = identifier[self] . identifier[_read_header] ( identifier[handle] ) | def _load_metadata(self, handle):
"""Load archive members metadata."""
rarinfo = self._read_header(handle)
while rarinfo:
self.filelist.append(rarinfo)
self.NameToInfo[rarinfo.filename] = rarinfo
self._process_current(handle, constants.RAR_SKIP)
rarinfo = self._read_header(handle) # depends on [control=['while'], data=[]] |
def interleave(*args):
'''Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
'''
result = []
for array in zip(*args):
result.append(tuple(flatten(array)))
return result | def function[interleave, parameter[]]:
constant[Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
]
variable[result] assign[=] list[[]]
for taget[name[array]] in starred[call[name[zip], parameter[<ast.Starred object at 0x7da20c76e0e0>]]] begin[:]
call[name[result].append, parameter[call[name[tuple], parameter[call[name[flatten], parameter[name[array]]]]]]]
return[name[result]] | keyword[def] identifier[interleave] (* identifier[args] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[array] keyword[in] identifier[zip] (* identifier[args] ):
identifier[result] . identifier[append] ( identifier[tuple] ( identifier[flatten] ( identifier[array] )))
keyword[return] identifier[result] | def interleave(*args):
"""Interleaves the elements of the provided arrays.
>>> a = [(0, 0), (1, 0), (2, 0), (3, 0)]
>>> b = [(0, 0), (0, 1), (0, 2), (0, 3)]
>>> interleave(a, b)
[(0, 0, 0, 0), (1, 0, 0, 1), (2, 0, 0, 2), (3, 0, 0, 3)]
This is useful for combining multiple vertex attributes into a single
vertex buffer. The shader attributes can be assigned a slice of the
vertex buffer.
"""
result = []
for array in zip(*args):
result.append(tuple(flatten(array))) # depends on [control=['for'], data=['array']]
return result |
def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg | def function[ParseMessage, parameter[descriptor, byte_str]]:
constant[Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
]
variable[result_class] assign[=] call[name[MakeClass], parameter[name[descriptor]]]
variable[new_msg] assign[=] call[name[result_class], parameter[]]
call[name[new_msg].ParseFromString, parameter[name[byte_str]]]
return[name[new_msg]] | keyword[def] identifier[ParseMessage] ( identifier[descriptor] , identifier[byte_str] ):
literal[string]
identifier[result_class] = identifier[MakeClass] ( identifier[descriptor] )
identifier[new_msg] = identifier[result_class] ()
identifier[new_msg] . identifier[ParseFromString] ( identifier[byte_str] )
keyword[return] identifier[new_msg] | def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg |
def reboot(self, target_mode=None, timeout_ms=None):
"""Reboots the device.
Args:
target_mode: Normal reboot when unspecified (or None). Can specify
other target modes, such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode.
"""
return self._simple_command('reboot', arg=target_mode,
timeout_ms=timeout_ms) | def function[reboot, parameter[self, target_mode, timeout_ms]]:
constant[Reboots the device.
Args:
target_mode: Normal reboot when unspecified (or None). Can specify
other target modes, such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode.
]
return[call[name[self]._simple_command, parameter[constant[reboot]]]] | keyword[def] identifier[reboot] ( identifier[self] , identifier[target_mode] = keyword[None] , identifier[timeout_ms] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_simple_command] ( literal[string] , identifier[arg] = identifier[target_mode] ,
identifier[timeout_ms] = identifier[timeout_ms] ) | def reboot(self, target_mode=None, timeout_ms=None):
"""Reboots the device.
Args:
target_mode: Normal reboot when unspecified (or None). Can specify
other target modes, such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode.
"""
return self._simple_command('reboot', arg=target_mode, timeout_ms=timeout_ms) |
def stage_name(self):
"""
Get stage name of current job instance.
Because instantiating job instance could be performed in different ways and those return different results,
we have to check where from to get name of the stage.
:return: stage name.
"""
if 'stage_name' in self.data and self.data.stage_name:
return self.data.get('stage_name')
else:
return self.stage.data.name | def function[stage_name, parameter[self]]:
constant[
Get stage name of current job instance.
Because instantiating job instance could be performed in different ways and those return different results,
we have to check where from to get name of the stage.
:return: stage name.
]
if <ast.BoolOp object at 0x7da18f811420> begin[:]
return[call[name[self].data.get, parameter[constant[stage_name]]]] | keyword[def] identifier[stage_name] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[data] keyword[and] identifier[self] . identifier[data] . identifier[stage_name] :
keyword[return] identifier[self] . identifier[data] . identifier[get] ( literal[string] )
keyword[else] :
keyword[return] identifier[self] . identifier[stage] . identifier[data] . identifier[name] | def stage_name(self):
"""
Get stage name of current job instance.
Because instantiating job instance could be performed in different ways and those return different results,
we have to check where from to get name of the stage.
:return: stage name.
"""
if 'stage_name' in self.data and self.data.stage_name:
return self.data.get('stage_name') # depends on [control=['if'], data=[]]
else:
return self.stage.data.name |
def detectors(regex=None, sep='\t', temporary=False):
"""Print the detectors table"""
db = DBManager(temporary=temporary)
dt = db.detectors
if regex is not None:
try:
re.compile(regex)
except re.error:
log.error("Invalid regex!")
return
dt = dt[dt['OID'].str.contains(regex) | dt['CITY'].str.contains(regex)]
dt.to_csv(sys.stdout, sep=sep) | def function[detectors, parameter[regex, sep, temporary]]:
constant[Print the detectors table]
variable[db] assign[=] call[name[DBManager], parameter[]]
variable[dt] assign[=] name[db].detectors
if compare[name[regex] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b253b5b0>
variable[dt] assign[=] call[name[dt]][binary_operation[call[call[name[dt]][constant[OID]].str.contains, parameter[name[regex]]] <ast.BitOr object at 0x7da2590d6aa0> call[call[name[dt]][constant[CITY]].str.contains, parameter[name[regex]]]]]
call[name[dt].to_csv, parameter[name[sys].stdout]] | keyword[def] identifier[detectors] ( identifier[regex] = keyword[None] , identifier[sep] = literal[string] , identifier[temporary] = keyword[False] ):
literal[string]
identifier[db] = identifier[DBManager] ( identifier[temporary] = identifier[temporary] )
identifier[dt] = identifier[db] . identifier[detectors]
keyword[if] identifier[regex] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[re] . identifier[compile] ( identifier[regex] )
keyword[except] identifier[re] . identifier[error] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return]
identifier[dt] = identifier[dt] [ identifier[dt] [ literal[string] ]. identifier[str] . identifier[contains] ( identifier[regex] )| identifier[dt] [ literal[string] ]. identifier[str] . identifier[contains] ( identifier[regex] )]
identifier[dt] . identifier[to_csv] ( identifier[sys] . identifier[stdout] , identifier[sep] = identifier[sep] ) | def detectors(regex=None, sep='\t', temporary=False):
"""Print the detectors table"""
db = DBManager(temporary=temporary)
dt = db.detectors
if regex is not None:
try:
re.compile(regex) # depends on [control=['try'], data=[]]
except re.error:
log.error('Invalid regex!')
return # depends on [control=['except'], data=[]]
dt = dt[dt['OID'].str.contains(regex) | dt['CITY'].str.contains(regex)] # depends on [control=['if'], data=['regex']]
dt.to_csv(sys.stdout, sep=sep) |
def destroy_balancer(balancer_id, profile, **libcloud_kwargs):
'''
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
balancer = conn.get_balancer(balancer_id)
return conn.destroy_balancer(balancer, **libcloud_kwargs) | def function[destroy_balancer, parameter[balancer_id, profile]]:
constant[
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
]
variable[conn] assign[=] call[name[_get_driver], parameter[]]
variable[libcloud_kwargs] assign[=] call[name[salt].utils.args.clean_kwargs, parameter[]]
variable[balancer] assign[=] call[name[conn].get_balancer, parameter[name[balancer_id]]]
return[call[name[conn].destroy_balancer, parameter[name[balancer]]]] | keyword[def] identifier[destroy_balancer] ( identifier[balancer_id] , identifier[profile] ,** identifier[libcloud_kwargs] ):
literal[string]
identifier[conn] = identifier[_get_driver] ( identifier[profile] = identifier[profile] )
identifier[libcloud_kwargs] = identifier[salt] . identifier[utils] . identifier[args] . identifier[clean_kwargs] (** identifier[libcloud_kwargs] )
identifier[balancer] = identifier[conn] . identifier[get_balancer] ( identifier[balancer_id] )
keyword[return] identifier[conn] . identifier[destroy_balancer] ( identifier[balancer] ,** identifier[libcloud_kwargs] ) | def destroy_balancer(balancer_id, profile, **libcloud_kwargs):
"""
Destroy a load balancer
:param balancer_id: LoadBalancer ID which should be used
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's destroy_balancer method
:type libcloud_kwargs: ``dict``
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.destroy_balancer balancer_1 profile1
"""
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
balancer = conn.get_balancer(balancer_id)
return conn.destroy_balancer(balancer, **libcloud_kwargs) |
def process_text(text, save_xml='cwms_output.xml'):
"""Processes text using the CWMS web service.
Parameters
----------
text : str
Text to process
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
xml = client.send_query(text, 'cwmsreader')
# There are actually two EKBs in the xml document. Extract the second.
first_end = xml.find('</ekb>') # End of first EKB
second_start = xml.find('<ekb', first_end) # Start of second EKB
second_end = xml.find('</ekb>', second_start) # End of second EKB
second_ekb = xml[second_start:second_end+len('</ekb>')] # second EKB
if save_xml:
with open(save_xml, 'wb') as fh:
fh.write(second_ekb.encode('utf-8'))
return process_ekb(second_ekb) | def function[process_text, parameter[text, save_xml]]:
constant[Processes text using the CWMS web service.
Parameters
----------
text : str
Text to process
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
]
variable[xml] assign[=] call[name[client].send_query, parameter[name[text], constant[cwmsreader]]]
variable[first_end] assign[=] call[name[xml].find, parameter[constant[</ekb>]]]
variable[second_start] assign[=] call[name[xml].find, parameter[constant[<ekb], name[first_end]]]
variable[second_end] assign[=] call[name[xml].find, parameter[constant[</ekb>], name[second_start]]]
variable[second_ekb] assign[=] call[name[xml]][<ast.Slice object at 0x7da18dc04220>]
if name[save_xml] begin[:]
with call[name[open], parameter[name[save_xml], constant[wb]]] begin[:]
call[name[fh].write, parameter[call[name[second_ekb].encode, parameter[constant[utf-8]]]]]
return[call[name[process_ekb], parameter[name[second_ekb]]]] | keyword[def] identifier[process_text] ( identifier[text] , identifier[save_xml] = literal[string] ):
literal[string]
identifier[xml] = identifier[client] . identifier[send_query] ( identifier[text] , literal[string] )
identifier[first_end] = identifier[xml] . identifier[find] ( literal[string] )
identifier[second_start] = identifier[xml] . identifier[find] ( literal[string] , identifier[first_end] )
identifier[second_end] = identifier[xml] . identifier[find] ( literal[string] , identifier[second_start] )
identifier[second_ekb] = identifier[xml] [ identifier[second_start] : identifier[second_end] + identifier[len] ( literal[string] )]
keyword[if] identifier[save_xml] :
keyword[with] identifier[open] ( identifier[save_xml] , literal[string] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[second_ekb] . identifier[encode] ( literal[string] ))
keyword[return] identifier[process_ekb] ( identifier[second_ekb] ) | def process_text(text, save_xml='cwms_output.xml'):
"""Processes text using the CWMS web service.
Parameters
----------
text : str
Text to process
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
xml = client.send_query(text, 'cwmsreader')
# There are actually two EKBs in the xml document. Extract the second.
first_end = xml.find('</ekb>') # End of first EKB
second_start = xml.find('<ekb', first_end) # Start of second EKB
second_end = xml.find('</ekb>', second_start) # End of second EKB
second_ekb = xml[second_start:second_end + len('</ekb>')] # second EKB
if save_xml:
with open(save_xml, 'wb') as fh:
fh.write(second_ekb.encode('utf-8')) # depends on [control=['with'], data=['fh']] # depends on [control=['if'], data=[]]
return process_ekb(second_ekb) |
def new_child(self, term, value, **kwargs):
"""Create a new term and add it to this term as a child. Creates grandchildren from the kwargs.
:param term: term name. Just the record term
:param term: Value to assign to the term
:param term: Term properties, which create children of the child term.
"""
tc = self.doc.get_term_class(term.lower())
c = tc(term, str(value) if value is not None else None,
parent=self, doc=self.doc, section=self.section).new_children(**kwargs)
c.term_value_name = self.doc.decl_terms.get(c.join, {}).get('termvaluename', c.term_value_name)
assert not c.term_is("*.Section")
self.children.append(c)
return c | def function[new_child, parameter[self, term, value]]:
constant[Create a new term and add it to this term as a child. Creates grandchildren from the kwargs.
:param term: term name. Just the record term
:param term: Value to assign to the term
:param term: Term properties, which create children of the child term.
]
variable[tc] assign[=] call[name[self].doc.get_term_class, parameter[call[name[term].lower, parameter[]]]]
variable[c] assign[=] call[call[name[tc], parameter[name[term], <ast.IfExp object at 0x7da18c4cc550>]].new_children, parameter[]]
name[c].term_value_name assign[=] call[call[name[self].doc.decl_terms.get, parameter[name[c].join, dictionary[[], []]]].get, parameter[constant[termvaluename], name[c].term_value_name]]
assert[<ast.UnaryOp object at 0x7da18c4cc250>]
call[name[self].children.append, parameter[name[c]]]
return[name[c]] | keyword[def] identifier[new_child] ( identifier[self] , identifier[term] , identifier[value] ,** identifier[kwargs] ):
literal[string]
identifier[tc] = identifier[self] . identifier[doc] . identifier[get_term_class] ( identifier[term] . identifier[lower] ())
identifier[c] = identifier[tc] ( identifier[term] , identifier[str] ( identifier[value] ) keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] ,
identifier[parent] = identifier[self] , identifier[doc] = identifier[self] . identifier[doc] , identifier[section] = identifier[self] . identifier[section] ). identifier[new_children] (** identifier[kwargs] )
identifier[c] . identifier[term_value_name] = identifier[self] . identifier[doc] . identifier[decl_terms] . identifier[get] ( identifier[c] . identifier[join] ,{}). identifier[get] ( literal[string] , identifier[c] . identifier[term_value_name] )
keyword[assert] keyword[not] identifier[c] . identifier[term_is] ( literal[string] )
identifier[self] . identifier[children] . identifier[append] ( identifier[c] )
keyword[return] identifier[c] | def new_child(self, term, value, **kwargs):
"""Create a new term and add it to this term as a child. Creates grandchildren from the kwargs.
:param term: term name. Just the record term
:param term: Value to assign to the term
:param term: Term properties, which create children of the child term.
"""
tc = self.doc.get_term_class(term.lower())
c = tc(term, str(value) if value is not None else None, parent=self, doc=self.doc, section=self.section).new_children(**kwargs)
c.term_value_name = self.doc.decl_terms.get(c.join, {}).get('termvaluename', c.term_value_name)
assert not c.term_is('*.Section')
self.children.append(c)
return c |
def _schedule_next_run(self):
"""
Compute the instant when this job should run next.
"""
if self.unit not in ('seconds', 'minutes', 'hours', 'days', 'weeks'):
raise ScheduleValueError('Invalid unit')
if self.latest is not None:
if not (self.latest >= self.interval):
raise ScheduleError('`latest` is greater than `interval`')
interval = random.randint(self.interval, self.latest)
else:
interval = self.interval
self.period = datetime.timedelta(**{self.unit: interval})
self.next_run = datetime.datetime.now() + self.period
if self.start_day is not None:
if self.unit != 'weeks':
raise ScheduleValueError('`unit` should be \'weeks\'')
weekdays = (
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday'
)
if self.start_day not in weekdays:
raise ScheduleValueError('Invalid start day')
weekday = weekdays.index(self.start_day)
days_ahead = weekday - self.next_run.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
self.next_run += datetime.timedelta(days_ahead) - self.period
if self.at_time is not None:
if (self.unit not in ('days', 'hours', 'minutes')
and self.start_day is None):
raise ScheduleValueError(('Invalid unit without'
' specifying start day'))
kwargs = {
'second': self.at_time.second,
'microsecond': 0
}
if self.unit == 'days' or self.start_day is not None:
kwargs['hour'] = self.at_time.hour
if self.unit in ['days', 'hours'] or self.start_day is not None:
kwargs['minute'] = self.at_time.minute
self.next_run = self.next_run.replace(**kwargs)
# If we are running for the first time, make sure we run
# at the specified time *today* (or *this hour*) as well
if not self.last_run:
now = datetime.datetime.now()
if (self.unit == 'days' and self.at_time > now.time() and
self.interval == 1):
self.next_run = self.next_run - datetime.timedelta(days=1)
elif self.unit == 'hours' \
and self.at_time.minute > now.minute \
or (self.at_time.minute == now.minute
and self.at_time.second > now.second):
self.next_run = self.next_run - datetime.timedelta(hours=1)
elif self.unit == 'minutes' \
and self.at_time.second > now.second:
self.next_run = self.next_run - \
datetime.timedelta(minutes=1)
if self.start_day is not None and self.at_time is not None:
# Let's see if we will still make that time we specified today
if (self.next_run - datetime.datetime.now()).days >= 7:
self.next_run -= self.period | def function[_schedule_next_run, parameter[self]]:
constant[
Compute the instant when this job should run next.
]
if compare[name[self].unit <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b1f95d20>, <ast.Constant object at 0x7da1b1f97190>, <ast.Constant object at 0x7da1b1f97910>, <ast.Constant object at 0x7da1b1f95570>, <ast.Constant object at 0x7da1b1f958d0>]]] begin[:]
<ast.Raise object at 0x7da1b1f95090>
if compare[name[self].latest is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b1f940d0> begin[:]
<ast.Raise object at 0x7da1b1f965f0>
variable[interval] assign[=] call[name[random].randint, parameter[name[self].interval, name[self].latest]]
name[self].period assign[=] call[name[datetime].timedelta, parameter[]]
name[self].next_run assign[=] binary_operation[call[name[datetime].datetime.now, parameter[]] + name[self].period]
if compare[name[self].start_day is_not constant[None]] begin[:]
if compare[name[self].unit not_equal[!=] constant[weeks]] begin[:]
<ast.Raise object at 0x7da1b1f97370>
variable[weekdays] assign[=] tuple[[<ast.Constant object at 0x7da1b1f95210>, <ast.Constant object at 0x7da1b1f97d30>, <ast.Constant object at 0x7da1b1f945b0>, <ast.Constant object at 0x7da1b1f96d40>, <ast.Constant object at 0x7da1b1f966b0>, <ast.Constant object at 0x7da1b1f972e0>, <ast.Constant object at 0x7da1b1f955d0>]]
if compare[name[self].start_day <ast.NotIn object at 0x7da2590d7190> name[weekdays]] begin[:]
<ast.Raise object at 0x7da1b1f94f40>
variable[weekday] assign[=] call[name[weekdays].index, parameter[name[self].start_day]]
variable[days_ahead] assign[=] binary_operation[name[weekday] - call[name[self].next_run.weekday, parameter[]]]
if compare[name[days_ahead] less_or_equal[<=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b1f94730>
<ast.AugAssign object at 0x7da1b1f96c50>
if compare[name[self].at_time is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b1f97dc0> begin[:]
<ast.Raise object at 0x7da1b1f96b60>
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f976a0>, <ast.Constant object at 0x7da1b1f95120>], [<ast.Attribute object at 0x7da1b1f96b00>, <ast.Constant object at 0x7da1b1c78880>]]
if <ast.BoolOp object at 0x7da1b1c7b3a0> begin[:]
call[name[kwargs]][constant[hour]] assign[=] name[self].at_time.hour
if <ast.BoolOp object at 0x7da1b1c79660> begin[:]
call[name[kwargs]][constant[minute]] assign[=] name[self].at_time.minute
name[self].next_run assign[=] call[name[self].next_run.replace, parameter[]]
if <ast.UnaryOp object at 0x7da1b1c7a110> begin[:]
variable[now] assign[=] call[name[datetime].datetime.now, parameter[]]
if <ast.BoolOp object at 0x7da1b1c78af0> begin[:]
name[self].next_run assign[=] binary_operation[name[self].next_run - call[name[datetime].timedelta, parameter[]]]
if <ast.BoolOp object at 0x7da1b1c79780> begin[:]
if compare[binary_operation[name[self].next_run - call[name[datetime].datetime.now, parameter[]]].days greater_or_equal[>=] constant[7]] begin[:]
<ast.AugAssign object at 0x7da1b1c7afe0> | keyword[def] identifier[_schedule_next_run] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[unit] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ScheduleValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[latest] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] ( identifier[self] . identifier[latest] >= identifier[self] . identifier[interval] ):
keyword[raise] identifier[ScheduleError] ( literal[string] )
identifier[interval] = identifier[random] . identifier[randint] ( identifier[self] . identifier[interval] , identifier[self] . identifier[latest] )
keyword[else] :
identifier[interval] = identifier[self] . identifier[interval]
identifier[self] . identifier[period] = identifier[datetime] . identifier[timedelta] (**{ identifier[self] . identifier[unit] : identifier[interval] })
identifier[self] . identifier[next_run] = identifier[datetime] . identifier[datetime] . identifier[now] ()+ identifier[self] . identifier[period]
keyword[if] identifier[self] . identifier[start_day] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[unit] != literal[string] :
keyword[raise] identifier[ScheduleValueError] ( literal[string] )
identifier[weekdays] =(
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string]
)
keyword[if] identifier[self] . identifier[start_day] keyword[not] keyword[in] identifier[weekdays] :
keyword[raise] identifier[ScheduleValueError] ( literal[string] )
identifier[weekday] = identifier[weekdays] . identifier[index] ( identifier[self] . identifier[start_day] )
identifier[days_ahead] = identifier[weekday] - identifier[self] . identifier[next_run] . identifier[weekday] ()
keyword[if] identifier[days_ahead] <= literal[int] :
identifier[days_ahead] += literal[int]
identifier[self] . identifier[next_run] += identifier[datetime] . identifier[timedelta] ( identifier[days_ahead] )- identifier[self] . identifier[period]
keyword[if] identifier[self] . identifier[at_time] keyword[is] keyword[not] keyword[None] :
keyword[if] ( identifier[self] . identifier[unit] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] )
keyword[and] identifier[self] . identifier[start_day] keyword[is] keyword[None] ):
keyword[raise] identifier[ScheduleValueError] (( literal[string]
literal[string] ))
identifier[kwargs] ={
literal[string] : identifier[self] . identifier[at_time] . identifier[second] ,
literal[string] : literal[int]
}
keyword[if] identifier[self] . identifier[unit] == literal[string] keyword[or] identifier[self] . identifier[start_day] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[at_time] . identifier[hour]
keyword[if] identifier[self] . identifier[unit] keyword[in] [ literal[string] , literal[string] ] keyword[or] identifier[self] . identifier[start_day] keyword[is] keyword[not] keyword[None] :
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[at_time] . identifier[minute]
identifier[self] . identifier[next_run] = identifier[self] . identifier[next_run] . identifier[replace] (** identifier[kwargs] )
keyword[if] keyword[not] identifier[self] . identifier[last_run] :
identifier[now] = identifier[datetime] . identifier[datetime] . identifier[now] ()
keyword[if] ( identifier[self] . identifier[unit] == literal[string] keyword[and] identifier[self] . identifier[at_time] > identifier[now] . identifier[time] () keyword[and]
identifier[self] . identifier[interval] == literal[int] ):
identifier[self] . identifier[next_run] = identifier[self] . identifier[next_run] - identifier[datetime] . identifier[timedelta] ( identifier[days] = literal[int] )
keyword[elif] identifier[self] . identifier[unit] == literal[string] keyword[and] identifier[self] . identifier[at_time] . identifier[minute] > identifier[now] . identifier[minute] keyword[or] ( identifier[self] . identifier[at_time] . identifier[minute] == identifier[now] . identifier[minute]
keyword[and] identifier[self] . identifier[at_time] . identifier[second] > identifier[now] . identifier[second] ):
identifier[self] . identifier[next_run] = identifier[self] . identifier[next_run] - identifier[datetime] . identifier[timedelta] ( identifier[hours] = literal[int] )
keyword[elif] identifier[self] . identifier[unit] == literal[string] keyword[and] identifier[self] . identifier[at_time] . identifier[second] > identifier[now] . identifier[second] :
identifier[self] . identifier[next_run] = identifier[self] . identifier[next_run] - identifier[datetime] . identifier[timedelta] ( identifier[minutes] = literal[int] )
keyword[if] identifier[self] . identifier[start_day] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[at_time] keyword[is] keyword[not] keyword[None] :
keyword[if] ( identifier[self] . identifier[next_run] - identifier[datetime] . identifier[datetime] . identifier[now] ()). identifier[days] >= literal[int] :
identifier[self] . identifier[next_run] -= identifier[self] . identifier[period] | def _schedule_next_run(self):
"""
Compute the instant when this job should run next.
"""
if self.unit not in ('seconds', 'minutes', 'hours', 'days', 'weeks'):
raise ScheduleValueError('Invalid unit') # depends on [control=['if'], data=[]]
if self.latest is not None:
if not self.latest >= self.interval:
raise ScheduleError('`latest` is greater than `interval`') # depends on [control=['if'], data=[]]
interval = random.randint(self.interval, self.latest) # depends on [control=['if'], data=[]]
else:
interval = self.interval
self.period = datetime.timedelta(**{self.unit: interval})
self.next_run = datetime.datetime.now() + self.period
if self.start_day is not None:
if self.unit != 'weeks':
raise ScheduleValueError("`unit` should be 'weeks'") # depends on [control=['if'], data=[]]
weekdays = ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday')
if self.start_day not in weekdays:
raise ScheduleValueError('Invalid start day') # depends on [control=['if'], data=[]]
weekday = weekdays.index(self.start_day)
days_ahead = weekday - self.next_run.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7 # depends on [control=['if'], data=['days_ahead']]
self.next_run += datetime.timedelta(days_ahead) - self.period # depends on [control=['if'], data=[]]
if self.at_time is not None:
if self.unit not in ('days', 'hours', 'minutes') and self.start_day is None:
raise ScheduleValueError('Invalid unit without specifying start day') # depends on [control=['if'], data=[]]
kwargs = {'second': self.at_time.second, 'microsecond': 0}
if self.unit == 'days' or self.start_day is not None:
kwargs['hour'] = self.at_time.hour # depends on [control=['if'], data=[]]
if self.unit in ['days', 'hours'] or self.start_day is not None:
kwargs['minute'] = self.at_time.minute # depends on [control=['if'], data=[]]
self.next_run = self.next_run.replace(**kwargs)
# If we are running for the first time, make sure we run
# at the specified time *today* (or *this hour*) as well
if not self.last_run:
now = datetime.datetime.now()
if self.unit == 'days' and self.at_time > now.time() and (self.interval == 1):
self.next_run = self.next_run - datetime.timedelta(days=1) # depends on [control=['if'], data=[]]
elif self.unit == 'hours' and self.at_time.minute > now.minute or (self.at_time.minute == now.minute and self.at_time.second > now.second):
self.next_run = self.next_run - datetime.timedelta(hours=1) # depends on [control=['if'], data=[]]
elif self.unit == 'minutes' and self.at_time.second > now.second:
self.next_run = self.next_run - datetime.timedelta(minutes=1) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.start_day is not None and self.at_time is not None:
# Let's see if we will still make that time we specified today
if (self.next_run - datetime.datetime.now()).days >= 7:
self.next_run -= self.period # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def toc(self):
"""
Returns table of contents as a block_token.List instance.
"""
from mistletoe.block_token import List
def get_indent(level):
if self.omit_title:
level -= 1
return ' ' * 4 * (level - 1)
def build_list_item(heading):
level, content = heading
template = '{indent}- {content}\n'
return template.format(indent=get_indent(level), content=content)
return List([build_list_item(heading) for heading in self._headings]) | def function[toc, parameter[self]]:
constant[
Returns table of contents as a block_token.List instance.
]
from relative_module[mistletoe.block_token] import module[List]
def function[get_indent, parameter[level]]:
if name[self].omit_title begin[:]
<ast.AugAssign object at 0x7da204345e70>
return[binary_operation[binary_operation[constant[ ] * constant[4]] * binary_operation[name[level] - constant[1]]]]
def function[build_list_item, parameter[heading]]:
<ast.Tuple object at 0x7da204345330> assign[=] name[heading]
variable[template] assign[=] constant[{indent}- {content}
]
return[call[name[template].format, parameter[]]]
return[call[name[List], parameter[<ast.ListComp object at 0x7da204344d90>]]] | keyword[def] identifier[toc] ( identifier[self] ):
literal[string]
keyword[from] identifier[mistletoe] . identifier[block_token] keyword[import] identifier[List]
keyword[def] identifier[get_indent] ( identifier[level] ):
keyword[if] identifier[self] . identifier[omit_title] :
identifier[level] -= literal[int]
keyword[return] literal[string] * literal[int] *( identifier[level] - literal[int] )
keyword[def] identifier[build_list_item] ( identifier[heading] ):
identifier[level] , identifier[content] = identifier[heading]
identifier[template] = literal[string]
keyword[return] identifier[template] . identifier[format] ( identifier[indent] = identifier[get_indent] ( identifier[level] ), identifier[content] = identifier[content] )
keyword[return] identifier[List] ([ identifier[build_list_item] ( identifier[heading] ) keyword[for] identifier[heading] keyword[in] identifier[self] . identifier[_headings] ]) | def toc(self):
"""
Returns table of contents as a block_token.List instance.
"""
from mistletoe.block_token import List
def get_indent(level):
if self.omit_title:
level -= 1 # depends on [control=['if'], data=[]]
return ' ' * 4 * (level - 1)
def build_list_item(heading):
(level, content) = heading
template = '{indent}- {content}\n'
return template.format(indent=get_indent(level), content=content)
return List([build_list_item(heading) for heading in self._headings]) |
def login(session, username, password, class_name=None):
"""
Login on coursera.org with the given credentials.
This adds the following cookies to the session:
sessionid, maestro_login, maestro_login_flag
"""
logging.debug('Initiating login.')
try:
session.cookies.clear('.coursera.org')
logging.debug('Cleared .coursera.org cookies.')
except KeyError:
logging.debug('There were no .coursera.org cookies to be cleared.')
# Hit class url
if class_name is not None:
class_url = CLASS_URL.format(class_name=class_name)
r = requests.get(class_url, allow_redirects=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.error(e)
raise ClassNotFound(class_name)
headers = prepare_auth_headers(session, include_cauth=False)
data = {
'email': username,
'password': password,
'webrequest': 'true'
}
# Auth API V3
r = session.post(AUTH_URL_V3, data=data,
headers=headers, allow_redirects=False)
try:
r.raise_for_status()
# Some how the order of cookies parameters are important
# for coursera!!!
v = session.cookies.pop('CAUTH')
session.cookies.set('CAUTH', v)
except requests.exceptions.HTTPError as e:
raise AuthenticationFailed('Cannot login on coursera.org: %s' % e)
logging.info('Logged in on coursera.org.') | def function[login, parameter[session, username, password, class_name]]:
constant[
Login on coursera.org with the given credentials.
This adds the following cookies to the session:
sessionid, maestro_login, maestro_login_flag
]
call[name[logging].debug, parameter[constant[Initiating login.]]]
<ast.Try object at 0x7da20c795ae0>
if compare[name[class_name] is_not constant[None]] begin[:]
variable[class_url] assign[=] call[name[CLASS_URL].format, parameter[]]
variable[r] assign[=] call[name[requests].get, parameter[name[class_url]]]
<ast.Try object at 0x7da20c796230>
variable[headers] assign[=] call[name[prepare_auth_headers], parameter[name[session]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da18dc046d0>, <ast.Constant object at 0x7da18dc04250>, <ast.Constant object at 0x7da18dc07700>], [<ast.Name object at 0x7da18dc07cd0>, <ast.Name object at 0x7da18dc04880>, <ast.Constant object at 0x7da18dc06350>]]
variable[r] assign[=] call[name[session].post, parameter[name[AUTH_URL_V3]]]
<ast.Try object at 0x7da18f8131f0>
call[name[logging].info, parameter[constant[Logged in on coursera.org.]]] | keyword[def] identifier[login] ( identifier[session] , identifier[username] , identifier[password] , identifier[class_name] = keyword[None] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] )
keyword[try] :
identifier[session] . identifier[cookies] . identifier[clear] ( literal[string] )
identifier[logging] . identifier[debug] ( literal[string] )
keyword[except] identifier[KeyError] :
identifier[logging] . identifier[debug] ( literal[string] )
keyword[if] identifier[class_name] keyword[is] keyword[not] keyword[None] :
identifier[class_url] = identifier[CLASS_URL] . identifier[format] ( identifier[class_name] = identifier[class_name] )
identifier[r] = identifier[requests] . identifier[get] ( identifier[class_url] , identifier[allow_redirects] = keyword[False] )
keyword[try] :
identifier[r] . identifier[raise_for_status] ()
keyword[except] identifier[requests] . identifier[exceptions] . identifier[HTTPError] keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( identifier[e] )
keyword[raise] identifier[ClassNotFound] ( identifier[class_name] )
identifier[headers] = identifier[prepare_auth_headers] ( identifier[session] , identifier[include_cauth] = keyword[False] )
identifier[data] ={
literal[string] : identifier[username] ,
literal[string] : identifier[password] ,
literal[string] : literal[string]
}
identifier[r] = identifier[session] . identifier[post] ( identifier[AUTH_URL_V3] , identifier[data] = identifier[data] ,
identifier[headers] = identifier[headers] , identifier[allow_redirects] = keyword[False] )
keyword[try] :
identifier[r] . identifier[raise_for_status] ()
identifier[v] = identifier[session] . identifier[cookies] . identifier[pop] ( literal[string] )
identifier[session] . identifier[cookies] . identifier[set] ( literal[string] , identifier[v] )
keyword[except] identifier[requests] . identifier[exceptions] . identifier[HTTPError] keyword[as] identifier[e] :
keyword[raise] identifier[AuthenticationFailed] ( literal[string] % identifier[e] )
identifier[logging] . identifier[info] ( literal[string] ) | def login(session, username, password, class_name=None):
"""
Login on coursera.org with the given credentials.
This adds the following cookies to the session:
sessionid, maestro_login, maestro_login_flag
"""
logging.debug('Initiating login.')
try:
session.cookies.clear('.coursera.org')
logging.debug('Cleared .coursera.org cookies.') # depends on [control=['try'], data=[]]
except KeyError:
logging.debug('There were no .coursera.org cookies to be cleared.') # depends on [control=['except'], data=[]]
# Hit class url
if class_name is not None:
class_url = CLASS_URL.format(class_name=class_name)
r = requests.get(class_url, allow_redirects=False)
try:
r.raise_for_status() # depends on [control=['try'], data=[]]
except requests.exceptions.HTTPError as e:
logging.error(e)
raise ClassNotFound(class_name) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['class_name']]
headers = prepare_auth_headers(session, include_cauth=False)
data = {'email': username, 'password': password, 'webrequest': 'true'}
# Auth API V3
r = session.post(AUTH_URL_V3, data=data, headers=headers, allow_redirects=False)
try:
r.raise_for_status()
# Some how the order of cookies parameters are important
# for coursera!!!
v = session.cookies.pop('CAUTH')
session.cookies.set('CAUTH', v) # depends on [control=['try'], data=[]]
except requests.exceptions.HTTPError as e:
raise AuthenticationFailed('Cannot login on coursera.org: %s' % e) # depends on [control=['except'], data=['e']]
logging.info('Logged in on coursera.org.') |
async def send_chat_message(self, send_chat_message_request):
"""Send a chat message to a conversation."""
response = hangouts_pb2.SendChatMessageResponse()
await self._pb_request('conversations/sendchatmessage',
send_chat_message_request, response)
return response | <ast.AsyncFunctionDef object at 0x7da18f00c130> | keyword[async] keyword[def] identifier[send_chat_message] ( identifier[self] , identifier[send_chat_message_request] ):
literal[string]
identifier[response] = identifier[hangouts_pb2] . identifier[SendChatMessageResponse] ()
keyword[await] identifier[self] . identifier[_pb_request] ( literal[string] ,
identifier[send_chat_message_request] , identifier[response] )
keyword[return] identifier[response] | async def send_chat_message(self, send_chat_message_request):
"""Send a chat message to a conversation."""
response = hangouts_pb2.SendChatMessageResponse()
await self._pb_request('conversations/sendchatmessage', send_chat_message_request, response)
return response |
def run(self):
"""Run checker."""
def split(module):
"""Split module into submodules."""
return tuple(module.split("."))
def modcmp(lib=(), test=()):
"""Compare import modules."""
if len(lib) > len(test):
return False
return all(a == b for a, b in zip(lib, test))
mods_1st_party = set()
mods_3rd_party = set()
# Get 1st party modules (used for absolute imports).
modules = [project2module(self.setup.keywords.get('name', ""))]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]]
mods_1st_party.update(split(x) for x in modules)
requirements = self.requirements
if self.setup.redirected:
# Use requirements from setup if available.
requirements = self.setup.get_requirements(
setup=self.processing_setup_py,
tests=True,
)
# Get 3rd party module names based on requirements.
for requirement in requirements:
modules = [project2module(requirement.project_name)]
if modules[0] in KNOWN_3RD_PARTIES:
modules = KNOWN_3RD_PARTIES[modules[0]]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]]
mods_3rd_party.update(split(x) for x in modules)
# When processing setup.py file, forcefully add setuptools to the
# project requirements. Setuptools might be required to build the
# project, even though it is not listed as a requirement - this
# package is required to run setup.py, so listing it as a setup
# requirement would be pointless.
if self.processing_setup_py:
mods_3rd_party.add(split("setuptools"))
for node in ImportVisitor(self.tree).imports:
_mod = split(node.mod)
_alt = split(node.alt)
if any([_mod[0] == x for x in STDLIB]):
continue
if any([modcmp(x, _mod) or modcmp(x, _alt)
for x in mods_1st_party]):
continue
if any([modcmp(x, _mod) or modcmp(x, _alt)
for x in mods_3rd_party]):
continue
yield (
node.line,
node.offset,
ERRORS['I900'].format(pkg=node.mod),
Flake8Checker,
) | def function[run, parameter[self]]:
constant[Run checker.]
def function[split, parameter[module]]:
constant[Split module into submodules.]
return[call[name[tuple], parameter[call[name[module].split, parameter[constant[.]]]]]]
def function[modcmp, parameter[lib, test]]:
constant[Compare import modules.]
if compare[call[name[len], parameter[name[lib]]] greater[>] call[name[len], parameter[name[test]]]] begin[:]
return[constant[False]]
return[call[name[all], parameter[<ast.GeneratorExp object at 0x7da204960640>]]]
variable[mods_1st_party] assign[=] call[name[set], parameter[]]
variable[mods_3rd_party] assign[=] call[name[set], parameter[]]
variable[modules] assign[=] list[[<ast.Call object at 0x7da204960d30>]]
if compare[call[name[modules]][constant[0]] in name[self].known_modules] begin[:]
variable[modules] assign[=] call[name[self].known_modules][call[name[modules]][constant[0]]]
call[name[mods_1st_party].update, parameter[<ast.GeneratorExp object at 0x7da204961c90>]]
variable[requirements] assign[=] name[self].requirements
if name[self].setup.redirected begin[:]
variable[requirements] assign[=] call[name[self].setup.get_requirements, parameter[]]
for taget[name[requirement]] in starred[name[requirements]] begin[:]
variable[modules] assign[=] list[[<ast.Call object at 0x7da204960d90>]]
if compare[call[name[modules]][constant[0]] in name[KNOWN_3RD_PARTIES]] begin[:]
variable[modules] assign[=] call[name[KNOWN_3RD_PARTIES]][call[name[modules]][constant[0]]]
if compare[call[name[modules]][constant[0]] in name[self].known_modules] begin[:]
variable[modules] assign[=] call[name[self].known_modules][call[name[modules]][constant[0]]]
call[name[mods_3rd_party].update, parameter[<ast.GeneratorExp object at 0x7da20c6aab00>]]
if name[self].processing_setup_py begin[:]
call[name[mods_3rd_party].add, parameter[call[name[split], parameter[constant[setuptools]]]]]
for taget[name[node]] in starred[call[name[ImportVisitor], parameter[name[self].tree]].imports] begin[:]
variable[_mod] assign[=] call[name[split], parameter[name[node].mod]]
variable[_alt] assign[=] call[name[split], parameter[name[node].alt]]
if call[name[any], parameter[<ast.ListComp object at 0x7da20c6aa2f0>]] begin[:]
continue
if call[name[any], parameter[<ast.ListComp object at 0x7da18c4cd570>]] begin[:]
continue
if call[name[any], parameter[<ast.ListComp object at 0x7da18c4cfe80>]] begin[:]
continue
<ast.Yield object at 0x7da20eb2b9d0> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[def] identifier[split] ( identifier[module] ):
literal[string]
keyword[return] identifier[tuple] ( identifier[module] . identifier[split] ( literal[string] ))
keyword[def] identifier[modcmp] ( identifier[lib] =(), identifier[test] =()):
literal[string]
keyword[if] identifier[len] ( identifier[lib] )> identifier[len] ( identifier[test] ):
keyword[return] keyword[False]
keyword[return] identifier[all] ( identifier[a] == identifier[b] keyword[for] identifier[a] , identifier[b] keyword[in] identifier[zip] ( identifier[lib] , identifier[test] ))
identifier[mods_1st_party] = identifier[set] ()
identifier[mods_3rd_party] = identifier[set] ()
identifier[modules] =[ identifier[project2module] ( identifier[self] . identifier[setup] . identifier[keywords] . identifier[get] ( literal[string] , literal[string] ))]
keyword[if] identifier[modules] [ literal[int] ] keyword[in] identifier[self] . identifier[known_modules] :
identifier[modules] = identifier[self] . identifier[known_modules] [ identifier[modules] [ literal[int] ]]
identifier[mods_1st_party] . identifier[update] ( identifier[split] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[modules] )
identifier[requirements] = identifier[self] . identifier[requirements]
keyword[if] identifier[self] . identifier[setup] . identifier[redirected] :
identifier[requirements] = identifier[self] . identifier[setup] . identifier[get_requirements] (
identifier[setup] = identifier[self] . identifier[processing_setup_py] ,
identifier[tests] = keyword[True] ,
)
keyword[for] identifier[requirement] keyword[in] identifier[requirements] :
identifier[modules] =[ identifier[project2module] ( identifier[requirement] . identifier[project_name] )]
keyword[if] identifier[modules] [ literal[int] ] keyword[in] identifier[KNOWN_3RD_PARTIES] :
identifier[modules] = identifier[KNOWN_3RD_PARTIES] [ identifier[modules] [ literal[int] ]]
keyword[if] identifier[modules] [ literal[int] ] keyword[in] identifier[self] . identifier[known_modules] :
identifier[modules] = identifier[self] . identifier[known_modules] [ identifier[modules] [ literal[int] ]]
identifier[mods_3rd_party] . identifier[update] ( identifier[split] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[modules] )
keyword[if] identifier[self] . identifier[processing_setup_py] :
identifier[mods_3rd_party] . identifier[add] ( identifier[split] ( literal[string] ))
keyword[for] identifier[node] keyword[in] identifier[ImportVisitor] ( identifier[self] . identifier[tree] ). identifier[imports] :
identifier[_mod] = identifier[split] ( identifier[node] . identifier[mod] )
identifier[_alt] = identifier[split] ( identifier[node] . identifier[alt] )
keyword[if] identifier[any] ([ identifier[_mod] [ literal[int] ]== identifier[x] keyword[for] identifier[x] keyword[in] identifier[STDLIB] ]):
keyword[continue]
keyword[if] identifier[any] ([ identifier[modcmp] ( identifier[x] , identifier[_mod] ) keyword[or] identifier[modcmp] ( identifier[x] , identifier[_alt] )
keyword[for] identifier[x] keyword[in] identifier[mods_1st_party] ]):
keyword[continue]
keyword[if] identifier[any] ([ identifier[modcmp] ( identifier[x] , identifier[_mod] ) keyword[or] identifier[modcmp] ( identifier[x] , identifier[_alt] )
keyword[for] identifier[x] keyword[in] identifier[mods_3rd_party] ]):
keyword[continue]
keyword[yield] (
identifier[node] . identifier[line] ,
identifier[node] . identifier[offset] ,
identifier[ERRORS] [ literal[string] ]. identifier[format] ( identifier[pkg] = identifier[node] . identifier[mod] ),
identifier[Flake8Checker] ,
) | def run(self):
"""Run checker."""
def split(module):
"""Split module into submodules."""
return tuple(module.split('.'))
def modcmp(lib=(), test=()):
"""Compare import modules."""
if len(lib) > len(test):
return False # depends on [control=['if'], data=[]]
return all((a == b for (a, b) in zip(lib, test)))
mods_1st_party = set()
mods_3rd_party = set()
# Get 1st party modules (used for absolute imports).
modules = [project2module(self.setup.keywords.get('name', ''))]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]] # depends on [control=['if'], data=[]]
mods_1st_party.update((split(x) for x in modules))
requirements = self.requirements
if self.setup.redirected:
# Use requirements from setup if available.
requirements = self.setup.get_requirements(setup=self.processing_setup_py, tests=True) # depends on [control=['if'], data=[]]
# Get 3rd party module names based on requirements.
for requirement in requirements:
modules = [project2module(requirement.project_name)]
if modules[0] in KNOWN_3RD_PARTIES:
modules = KNOWN_3RD_PARTIES[modules[0]] # depends on [control=['if'], data=['KNOWN_3RD_PARTIES']]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]] # depends on [control=['if'], data=[]]
mods_3rd_party.update((split(x) for x in modules)) # depends on [control=['for'], data=['requirement']]
# When processing setup.py file, forcefully add setuptools to the
# project requirements. Setuptools might be required to build the
# project, even though it is not listed as a requirement - this
# package is required to run setup.py, so listing it as a setup
# requirement would be pointless.
if self.processing_setup_py:
mods_3rd_party.add(split('setuptools')) # depends on [control=['if'], data=[]]
for node in ImportVisitor(self.tree).imports:
_mod = split(node.mod)
_alt = split(node.alt)
if any([_mod[0] == x for x in STDLIB]):
continue # depends on [control=['if'], data=[]]
if any([modcmp(x, _mod) or modcmp(x, _alt) for x in mods_1st_party]):
continue # depends on [control=['if'], data=[]]
if any([modcmp(x, _mod) or modcmp(x, _alt) for x in mods_3rd_party]):
continue # depends on [control=['if'], data=[]]
yield (node.line, node.offset, ERRORS['I900'].format(pkg=node.mod), Flake8Checker) # depends on [control=['for'], data=['node']] |
def get_relation_routes(self, viewset):
"""
Generate routes to serve relational objects. This method will add
a sub-URL for each relational field.
e.g.
A viewset for the following serializer:
class UserSerializer(..):
events = DynamicRelationField(EventSerializer, many=True)
groups = DynamicRelationField(GroupSerializer, many=True)
location = DynamicRelationField(LocationSerializer)
will have the following URLs added:
/users/<pk>/events/
/users/<pk>/groups/
/users/<pk>/location/
"""
routes = []
if not hasattr(viewset, 'serializer_class'):
return routes
if not hasattr(viewset, 'list_related'):
return routes
serializer = viewset.serializer_class()
fields = getattr(serializer, 'get_link_fields', lambda: [])()
route_name = '{basename}-{methodnamehyphen}'
for field_name, field in six.iteritems(fields):
methodname = 'list_related'
url = (
r'^{prefix}/{lookup}/(?P<field_name>%s)'
'{trailing_slash}$' % field_name
)
routes.append(Route(
url=url,
mapping={'get': methodname},
name=replace_methodname(route_name, field_name),
initkwargs={}
))
return routes | def function[get_relation_routes, parameter[self, viewset]]:
constant[
Generate routes to serve relational objects. This method will add
a sub-URL for each relational field.
e.g.
A viewset for the following serializer:
class UserSerializer(..):
events = DynamicRelationField(EventSerializer, many=True)
groups = DynamicRelationField(GroupSerializer, many=True)
location = DynamicRelationField(LocationSerializer)
will have the following URLs added:
/users/<pk>/events/
/users/<pk>/groups/
/users/<pk>/location/
]
variable[routes] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da18eb55bd0> begin[:]
return[name[routes]]
if <ast.UnaryOp object at 0x7da18eb57280> begin[:]
return[name[routes]]
variable[serializer] assign[=] call[name[viewset].serializer_class, parameter[]]
variable[fields] assign[=] call[call[name[getattr], parameter[name[serializer], constant[get_link_fields], <ast.Lambda object at 0x7da18eb56680>]], parameter[]]
variable[route_name] assign[=] constant[{basename}-{methodnamehyphen}]
for taget[tuple[[<ast.Name object at 0x7da18eb57ca0>, <ast.Name object at 0x7da18eb55210>]]] in starred[call[name[six].iteritems, parameter[name[fields]]]] begin[:]
variable[methodname] assign[=] constant[list_related]
variable[url] assign[=] binary_operation[constant[^{prefix}/{lookup}/(?P<field_name>%s){trailing_slash}$] <ast.Mod object at 0x7da2590d6920> name[field_name]]
call[name[routes].append, parameter[call[name[Route], parameter[]]]]
return[name[routes]] | keyword[def] identifier[get_relation_routes] ( identifier[self] , identifier[viewset] ):
literal[string]
identifier[routes] =[]
keyword[if] keyword[not] identifier[hasattr] ( identifier[viewset] , literal[string] ):
keyword[return] identifier[routes]
keyword[if] keyword[not] identifier[hasattr] ( identifier[viewset] , literal[string] ):
keyword[return] identifier[routes]
identifier[serializer] = identifier[viewset] . identifier[serializer_class] ()
identifier[fields] = identifier[getattr] ( identifier[serializer] , literal[string] , keyword[lambda] :[])()
identifier[route_name] = literal[string]
keyword[for] identifier[field_name] , identifier[field] keyword[in] identifier[six] . identifier[iteritems] ( identifier[fields] ):
identifier[methodname] = literal[string]
identifier[url] =(
literal[string]
literal[string] % identifier[field_name]
)
identifier[routes] . identifier[append] ( identifier[Route] (
identifier[url] = identifier[url] ,
identifier[mapping] ={ literal[string] : identifier[methodname] },
identifier[name] = identifier[replace_methodname] ( identifier[route_name] , identifier[field_name] ),
identifier[initkwargs] ={}
))
keyword[return] identifier[routes] | def get_relation_routes(self, viewset):
"""
Generate routes to serve relational objects. This method will add
a sub-URL for each relational field.
e.g.
A viewset for the following serializer:
class UserSerializer(..):
events = DynamicRelationField(EventSerializer, many=True)
groups = DynamicRelationField(GroupSerializer, many=True)
location = DynamicRelationField(LocationSerializer)
will have the following URLs added:
/users/<pk>/events/
/users/<pk>/groups/
/users/<pk>/location/
"""
routes = []
if not hasattr(viewset, 'serializer_class'):
return routes # depends on [control=['if'], data=[]]
if not hasattr(viewset, 'list_related'):
return routes # depends on [control=['if'], data=[]]
serializer = viewset.serializer_class()
fields = getattr(serializer, 'get_link_fields', lambda : [])()
route_name = '{basename}-{methodnamehyphen}'
for (field_name, field) in six.iteritems(fields):
methodname = 'list_related'
url = '^{prefix}/{lookup}/(?P<field_name>%s){trailing_slash}$' % field_name
routes.append(Route(url=url, mapping={'get': methodname}, name=replace_methodname(route_name, field_name), initkwargs={})) # depends on [control=['for'], data=[]]
return routes |
def main() -> None:
"""
Command-line handler for the ``find_recovered_openxml`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
Tool to recognize and rescue Microsoft Office OpenXML files, even if they have
garbage appended to them.
- Rationale: when you have accidentally deleted files from an NTFS disk, and
they really matter, you should (a) stop what you're doing; (b) clone the disk
to an image file using "dd" under Linux; (c) perform all subsequent
operations on the cloned image (in read-only mode). Those steps might
include:
- ntfsundelete, to find files that the filesystem is still aware of;
- scalpel, to find files based on their contents.
- Scalpel is great at finding stuff efficiently, but it works best when files
can be defined by both a start (header) signature and an end (footer)
signature. However, the Microsoft Office OpenXML file format has a
recognizable header, but no standard footer. In these circumstances, Scalpel
reads up to a certain limit that you specify in its configuration file. (To
retrieve large Powerpoint files, this limit needs to be substantial, e.g.
50 Mb or more, depending on your ways of working with Powerpoint.)
- That means that files emerging from a Scalpel search for DOCX/PPTX/XLSX files
may be
- false positives, having nothing to do with Office;
- clean Office files (the least likely category!);
- Office files with garbage stuck on the end.
- The OpenXML file format is just a zip file. If you stick too much garbage on
the end of a zip file, zip readers will see it as corrupt.
- THIS TOOL detects (and optionally moves) potentially corrupted zipfiles based
on file contents, by unzipping the file and checking for "inner" files with
names like:
File type Contents filename signature (regular expression)
----------------------------------------------------------------
DOCX {DOCX_CONTENTS_REGEX_STR}
PPTX {PPTX_CONTENTS_REGEX_STR}
XLSX {XLSX_CONTENTS_REGEX_STR}
- WARNING: it's possible for an OpenXML file to contain more than one of these.
If so, they may be mis-classified.
- If a file is not immediately readable as a zip, it uses Linux's "zip -FF" to
repair zip files with corrupted ends, and tries again.
- Having found valid-looking files, you can elect to move them elsewhere.
- As an additional and VERY DANGEROUS operation, you can elect to delete files
that this tool doesn't recognize. (Why? Because a 450Gb disk might produce
well in excess of 1.7Tb of candidate files; many will be false positives and
even the true positives will all be expanded to your file size limit, e.g.
50 Mb. You may have a problem with available disk space, so running this tool
regularly allows you to clear up the junk. Use the --run_every option to help
with this.)
""".format(
DOCX_CONTENTS_REGEX_STR=DOCX_CONTENTS_REGEX_STR,
PPTX_CONTENTS_REGEX_STR=PPTX_CONTENTS_REGEX_STR,
XLSX_CONTENTS_REGEX_STR=XLSX_CONTENTS_REGEX_STR,
)
)
parser.add_argument(
"filename", nargs="+",
help="File(s) to check. You can also specify directores if you use "
"--recursive"
)
parser.add_argument(
"--recursive", action="store_true",
help="Allow search to descend recursively into any directories "
"encountered."
)
parser.add_argument(
"--skip_files", nargs="*", default=[],
help="File pattern(s) to skip. You can specify wildcards like '*.txt' "
"(but you will have to enclose that pattern in quotes under "
"UNIX-like operating systems). The basename of each file will be "
"tested against these filenames/patterns. Consider including "
"Scalpel's 'audit.txt'."
)
parser.add_argument(
"--filetypes", nargs="+", default=FILETYPES,
help="File types to check. Options: {}".format(FILETYPES)
)
parser.add_argument(
"--move_to",
help="If the file is recognized as one of the specified file types, "
"move it to the directory specified here."
)
parser.add_argument(
"--delete_if_not_specified_file_type", action="store_true",
help="If a file is NOT recognized as one of the specified file types, "
"delete it. VERY DANGEROUS."
)
parser.add_argument(
"--run_repeatedly", type=int,
help="Run the tool repeatedly with a pause of <run_repeatedly> "
"seconds between runs. (For this to work well with the move/"
"delete options, you should specify one or more DIRECTORIES in "
"the 'filename' arguments, not files, and you will need the "
"--recursive option.)"
)
parser.add_argument(
"--nprocesses", type=int, default=multiprocessing.cpu_count(),
help="Specify the number of processes to run in parallel."
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
parser.add_argument(
"--show_zip_output", action="store_true",
help="Verbose output from the external 'zip' tool"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO,
with_process_id=True
)
# Further argument checks
if args.move_to:
if not os.path.isdir(args.move_to):
raise ValueError("Destination directory {!r} is not a "
"directory".format(args.move_to))
if not args.filetypes:
raise ValueError("No file type to scan for")
filetypes = [ft.lower() for ft in args.filetypes]
if any(ft not in FILETYPES for ft in filetypes):
raise ValueError("Invalid filetypes; choose from {}".format(FILETYPES))
assert shutil.which("zip"), "Need 'zip' tool!"
# Repeated scanning loop
while True:
log.info("Starting scan.")
log.info("- Looking for filetypes {}", filetypes)
log.info("- Scanning files/directories {!r}{}",
args.filename,
" recursively" if args.recursive else "")
log.info("- Skipping files matching {!r}", args.skip_files)
log.info("- Using {} simultaneous processes", args.nprocesses)
if args.move_to:
log.info("- Moving target files to " + args.move_to)
if args.delete_if_not_specified_file_type:
log.info("- Deleting non-target files.")
# Iterate through files
pool = multiprocessing.Pool(processes=args.nprocesses)
for filename in gen_filenames(starting_filenames=args.filename,
recursive=args.recursive):
src_basename = os.path.basename(filename)
if any(fnmatch.fnmatch(src_basename, pattern)
for pattern in args.skip_files):
log.info("Skipping file as ordered: " + filename)
continue
exists, locked = exists_locked(filename)
if locked or not exists:
log.info("Skipping currently inaccessible file: " + filename)
continue
kwargs = {
'filename': filename,
'filetypes': filetypes,
'move_to': args.move_to,
'delete_if_not_specified_file_type':
args.delete_if_not_specified_file_type,
'show_zip_output': args.show_zip_output,
}
# log.critical("start")
pool.apply_async(process_file, [], kwargs)
# result = pool.apply_async(process_file, [], kwargs)
# result.get() # will re-raise any child exceptions
# ... but it waits for the process to complete! That's no help.
# log.critical("next")
# ... https://stackoverflow.com/questions/22094852/how-to-catch-exceptions-in-workers-in-multiprocessing # noqa
pool.close()
pool.join()
log.info("Finished scan.")
if args.run_repeatedly is None:
break
log.info("Sleeping for {} s...", args.run_repeatedly)
sleep(args.run_repeatedly) | def function[main, parameter[]]:
constant[
Command-line handler for the ``find_recovered_openxml`` tool.
Use the ``--help`` option for help.
]
variable[parser] assign[=] call[name[ArgumentParser], parameter[]]
call[name[parser].add_argument, parameter[constant[filename]]]
call[name[parser].add_argument, parameter[constant[--recursive]]]
call[name[parser].add_argument, parameter[constant[--skip_files]]]
call[name[parser].add_argument, parameter[constant[--filetypes]]]
call[name[parser].add_argument, parameter[constant[--move_to]]]
call[name[parser].add_argument, parameter[constant[--delete_if_not_specified_file_type]]]
call[name[parser].add_argument, parameter[constant[--run_repeatedly]]]
call[name[parser].add_argument, parameter[constant[--nprocesses]]]
call[name[parser].add_argument, parameter[constant[--verbose]]]
call[name[parser].add_argument, parameter[constant[--show_zip_output]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
call[name[main_only_quicksetup_rootlogger], parameter[]]
if name[args].move_to begin[:]
if <ast.UnaryOp object at 0x7da1b1734cd0> begin[:]
<ast.Raise object at 0x7da1b1735000>
if <ast.UnaryOp object at 0x7da1b17359f0> begin[:]
<ast.Raise object at 0x7da1b1735f30>
variable[filetypes] assign[=] <ast.ListComp object at 0x7da1b173ee60>
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b173f1f0>]] begin[:]
<ast.Raise object at 0x7da1b173f400>
assert[call[name[shutil].which, parameter[constant[zip]]]]
while constant[True] begin[:]
call[name[log].info, parameter[constant[Starting scan.]]]
call[name[log].info, parameter[constant[- Looking for filetypes {}], name[filetypes]]]
call[name[log].info, parameter[constant[- Scanning files/directories {!r}{}], name[args].filename, <ast.IfExp object at 0x7da1b173d030>]]
call[name[log].info, parameter[constant[- Skipping files matching {!r}], name[args].skip_files]]
call[name[log].info, parameter[constant[- Using {} simultaneous processes], name[args].nprocesses]]
if name[args].move_to begin[:]
call[name[log].info, parameter[binary_operation[constant[- Moving target files to ] + name[args].move_to]]]
if name[args].delete_if_not_specified_file_type begin[:]
call[name[log].info, parameter[constant[- Deleting non-target files.]]]
variable[pool] assign[=] call[name[multiprocessing].Pool, parameter[]]
for taget[name[filename]] in starred[call[name[gen_filenames], parameter[]]] begin[:]
variable[src_basename] assign[=] call[name[os].path.basename, parameter[name[filename]]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b173e500>]] begin[:]
call[name[log].info, parameter[binary_operation[constant[Skipping file as ordered: ] + name[filename]]]]
continue
<ast.Tuple object at 0x7da1b173f9d0> assign[=] call[name[exists_locked], parameter[name[filename]]]
if <ast.BoolOp object at 0x7da1b173f9a0> begin[:]
call[name[log].info, parameter[binary_operation[constant[Skipping currently inaccessible file: ] + name[filename]]]]
continue
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da1b173c0a0>, <ast.Constant object at 0x7da1b173dea0>, <ast.Constant object at 0x7da1b173e3b0>, <ast.Constant object at 0x7da1b173c130>, <ast.Constant object at 0x7da1b173c7c0>], [<ast.Name object at 0x7da1b173e110>, <ast.Name object at 0x7da1b173e0b0>, <ast.Attribute object at 0x7da1b173ea40>, <ast.Attribute object at 0x7da1b173e0e0>, <ast.Attribute object at 0x7da1b173ea10>]]
call[name[pool].apply_async, parameter[name[process_file], list[[]], name[kwargs]]]
call[name[pool].close, parameter[]]
call[name[pool].join, parameter[]]
call[name[log].info, parameter[constant[Finished scan.]]]
if compare[name[args].run_repeatedly is constant[None]] begin[:]
break
call[name[log].info, parameter[constant[Sleeping for {} s...], name[args].run_repeatedly]]
call[name[sleep], parameter[name[args].run_repeatedly]] | keyword[def] identifier[main] ()-> keyword[None] :
literal[string]
identifier[parser] = identifier[ArgumentParser] (
identifier[formatter_class] = identifier[RawDescriptionHelpFormatter] ,
identifier[description] = literal[string] . identifier[format] (
identifier[DOCX_CONTENTS_REGEX_STR] = identifier[DOCX_CONTENTS_REGEX_STR] ,
identifier[PPTX_CONTENTS_REGEX_STR] = identifier[PPTX_CONTENTS_REGEX_STR] ,
identifier[XLSX_CONTENTS_REGEX_STR] = identifier[XLSX_CONTENTS_REGEX_STR] ,
)
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[nargs] = literal[string] , identifier[default] =[],
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[nargs] = literal[string] , identifier[default] = identifier[FILETYPES] ,
identifier[help] = literal[string] . identifier[format] ( identifier[FILETYPES] )
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[help] = literal[string]
literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[type] = identifier[int] ,
identifier[help] = literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[type] = identifier[int] , identifier[default] = identifier[multiprocessing] . identifier[cpu_count] (),
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[action] = literal[string] ,
identifier[help] = literal[string]
)
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[main_only_quicksetup_rootlogger] (
identifier[level] = identifier[logging] . identifier[DEBUG] keyword[if] identifier[args] . identifier[verbose] keyword[else] identifier[logging] . identifier[INFO] ,
identifier[with_process_id] = keyword[True]
)
keyword[if] identifier[args] . identifier[move_to] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[args] . identifier[move_to] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[args] . identifier[move_to] ))
keyword[if] keyword[not] identifier[args] . identifier[filetypes] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[filetypes] =[ identifier[ft] . identifier[lower] () keyword[for] identifier[ft] keyword[in] identifier[args] . identifier[filetypes] ]
keyword[if] identifier[any] ( identifier[ft] keyword[not] keyword[in] identifier[FILETYPES] keyword[for] identifier[ft] keyword[in] identifier[filetypes] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[FILETYPES] ))
keyword[assert] identifier[shutil] . identifier[which] ( literal[string] ), literal[string]
keyword[while] keyword[True] :
identifier[log] . identifier[info] ( literal[string] )
identifier[log] . identifier[info] ( literal[string] , identifier[filetypes] )
identifier[log] . identifier[info] ( literal[string] ,
identifier[args] . identifier[filename] ,
literal[string] keyword[if] identifier[args] . identifier[recursive] keyword[else] literal[string] )
identifier[log] . identifier[info] ( literal[string] , identifier[args] . identifier[skip_files] )
identifier[log] . identifier[info] ( literal[string] , identifier[args] . identifier[nprocesses] )
keyword[if] identifier[args] . identifier[move_to] :
identifier[log] . identifier[info] ( literal[string] + identifier[args] . identifier[move_to] )
keyword[if] identifier[args] . identifier[delete_if_not_specified_file_type] :
identifier[log] . identifier[info] ( literal[string] )
identifier[pool] = identifier[multiprocessing] . identifier[Pool] ( identifier[processes] = identifier[args] . identifier[nprocesses] )
keyword[for] identifier[filename] keyword[in] identifier[gen_filenames] ( identifier[starting_filenames] = identifier[args] . identifier[filename] ,
identifier[recursive] = identifier[args] . identifier[recursive] ):
identifier[src_basename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] )
keyword[if] identifier[any] ( identifier[fnmatch] . identifier[fnmatch] ( identifier[src_basename] , identifier[pattern] )
keyword[for] identifier[pattern] keyword[in] identifier[args] . identifier[skip_files] ):
identifier[log] . identifier[info] ( literal[string] + identifier[filename] )
keyword[continue]
identifier[exists] , identifier[locked] = identifier[exists_locked] ( identifier[filename] )
keyword[if] identifier[locked] keyword[or] keyword[not] identifier[exists] :
identifier[log] . identifier[info] ( literal[string] + identifier[filename] )
keyword[continue]
identifier[kwargs] ={
literal[string] : identifier[filename] ,
literal[string] : identifier[filetypes] ,
literal[string] : identifier[args] . identifier[move_to] ,
literal[string] :
identifier[args] . identifier[delete_if_not_specified_file_type] ,
literal[string] : identifier[args] . identifier[show_zip_output] ,
}
identifier[pool] . identifier[apply_async] ( identifier[process_file] ,[], identifier[kwargs] )
identifier[pool] . identifier[close] ()
identifier[pool] . identifier[join] ()
identifier[log] . identifier[info] ( literal[string] )
keyword[if] identifier[args] . identifier[run_repeatedly] keyword[is] keyword[None] :
keyword[break]
identifier[log] . identifier[info] ( literal[string] , identifier[args] . identifier[run_repeatedly] )
identifier[sleep] ( identifier[args] . identifier[run_repeatedly] ) | def main() -> None:
"""
Command-line handler for the ``find_recovered_openxml`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description='\nTool to recognize and rescue Microsoft Office OpenXML files, even if they have\ngarbage appended to them. \n\n- Rationale: when you have accidentally deleted files from an NTFS disk, and\n they really matter, you should (a) stop what you\'re doing; (b) clone the disk\n to an image file using "dd" under Linux; (c) perform all subsequent \n operations on the cloned image (in read-only mode). Those steps might \n include:\n - ntfsundelete, to find files that the filesystem is still aware of;\n - scalpel, to find files based on their contents.\n\n- Scalpel is great at finding stuff efficiently, but it works best when files\n can be defined by both a start (header) signature and an end (footer)\n signature. However, the Microsoft Office OpenXML file format has a \n recognizable header, but no standard footer. In these circumstances, Scalpel\n reads up to a certain limit that you specify in its configuration file. (To\n retrieve large Powerpoint files, this limit needs to be substantial, e.g.\n 50 Mb or more, depending on your ways of working with Powerpoint.)\n\n- That means that files emerging from a Scalpel search for DOCX/PPTX/XLSX files\n may be\n - false positives, having nothing to do with Office;\n - clean Office files (the least likely category!);\n - Office files with garbage stuck on the end.\n \n- The OpenXML file format is just a zip file. If you stick too much garbage on\n the end of a zip file, zip readers will see it as corrupt. \n \n- THIS TOOL detects (and optionally moves) potentially corrupted zipfiles based \n on file contents, by unzipping the file and checking for "inner" files with\n names like:\n\n File type Contents filename signature (regular expression)\n ----------------------------------------------------------------\n DOCX {DOCX_CONTENTS_REGEX_STR} \n PPTX {PPTX_CONTENTS_REGEX_STR}\n XLSX {XLSX_CONTENTS_REGEX_STR}\n\n- WARNING: it\'s possible for an OpenXML file to contain more than one of these.\n If so, they may be mis-classified.\n\n- If a file is not immediately readable as a zip, it uses Linux\'s "zip -FF" to \n repair zip files with corrupted ends, and tries again.\n \n- Having found valid-looking files, you can elect to move them elsewhere.\n\n- As an additional and VERY DANGEROUS operation, you can elect to delete files\n that this tool doesn\'t recognize. (Why? Because a 450Gb disk might produce\n well in excess of 1.7Tb of candidate files; many will be false positives and\n even the true positives will all be expanded to your file size limit, e.g.\n 50 Mb. You may have a problem with available disk space, so running this tool\n regularly allows you to clear up the junk. Use the --run_every option to help \n with this.)\n\n '.format(DOCX_CONTENTS_REGEX_STR=DOCX_CONTENTS_REGEX_STR, PPTX_CONTENTS_REGEX_STR=PPTX_CONTENTS_REGEX_STR, XLSX_CONTENTS_REGEX_STR=XLSX_CONTENTS_REGEX_STR))
parser.add_argument('filename', nargs='+', help='File(s) to check. You can also specify directores if you use --recursive')
parser.add_argument('--recursive', action='store_true', help='Allow search to descend recursively into any directories encountered.')
parser.add_argument('--skip_files', nargs='*', default=[], help="File pattern(s) to skip. You can specify wildcards like '*.txt' (but you will have to enclose that pattern in quotes under UNIX-like operating systems). The basename of each file will be tested against these filenames/patterns. Consider including Scalpel's 'audit.txt'.")
parser.add_argument('--filetypes', nargs='+', default=FILETYPES, help='File types to check. Options: {}'.format(FILETYPES))
parser.add_argument('--move_to', help='If the file is recognized as one of the specified file types, move it to the directory specified here.')
parser.add_argument('--delete_if_not_specified_file_type', action='store_true', help='If a file is NOT recognized as one of the specified file types, delete it. VERY DANGEROUS.')
parser.add_argument('--run_repeatedly', type=int, help="Run the tool repeatedly with a pause of <run_repeatedly> seconds between runs. (For this to work well with the move/delete options, you should specify one or more DIRECTORIES in the 'filename' arguments, not files, and you will need the --recursive option.)")
parser.add_argument('--nprocesses', type=int, default=multiprocessing.cpu_count(), help='Specify the number of processes to run in parallel.')
parser.add_argument('--verbose', action='store_true', help='Verbose output')
parser.add_argument('--show_zip_output', action='store_true', help="Verbose output from the external 'zip' tool")
args = parser.parse_args()
main_only_quicksetup_rootlogger(level=logging.DEBUG if args.verbose else logging.INFO, with_process_id=True)
# Further argument checks
if args.move_to:
if not os.path.isdir(args.move_to):
raise ValueError('Destination directory {!r} is not a directory'.format(args.move_to)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not args.filetypes:
raise ValueError('No file type to scan for') # depends on [control=['if'], data=[]]
filetypes = [ft.lower() for ft in args.filetypes]
if any((ft not in FILETYPES for ft in filetypes)):
raise ValueError('Invalid filetypes; choose from {}'.format(FILETYPES)) # depends on [control=['if'], data=[]]
assert shutil.which('zip'), "Need 'zip' tool!"
# Repeated scanning loop
while True:
log.info('Starting scan.')
log.info('- Looking for filetypes {}', filetypes)
log.info('- Scanning files/directories {!r}{}', args.filename, ' recursively' if args.recursive else '')
log.info('- Skipping files matching {!r}', args.skip_files)
log.info('- Using {} simultaneous processes', args.nprocesses)
if args.move_to:
log.info('- Moving target files to ' + args.move_to) # depends on [control=['if'], data=[]]
if args.delete_if_not_specified_file_type:
log.info('- Deleting non-target files.') # depends on [control=['if'], data=[]]
# Iterate through files
pool = multiprocessing.Pool(processes=args.nprocesses)
for filename in gen_filenames(starting_filenames=args.filename, recursive=args.recursive):
src_basename = os.path.basename(filename)
if any((fnmatch.fnmatch(src_basename, pattern) for pattern in args.skip_files)):
log.info('Skipping file as ordered: ' + filename)
continue # depends on [control=['if'], data=[]]
(exists, locked) = exists_locked(filename)
if locked or not exists:
log.info('Skipping currently inaccessible file: ' + filename)
continue # depends on [control=['if'], data=[]]
kwargs = {'filename': filename, 'filetypes': filetypes, 'move_to': args.move_to, 'delete_if_not_specified_file_type': args.delete_if_not_specified_file_type, 'show_zip_output': args.show_zip_output}
# log.critical("start")
pool.apply_async(process_file, [], kwargs) # depends on [control=['for'], data=['filename']]
# result = pool.apply_async(process_file, [], kwargs)
# result.get() # will re-raise any child exceptions
# ... but it waits for the process to complete! That's no help.
# log.critical("next")
# ... https://stackoverflow.com/questions/22094852/how-to-catch-exceptions-in-workers-in-multiprocessing # noqa
pool.close()
pool.join()
log.info('Finished scan.')
if args.run_repeatedly is None:
break # depends on [control=['if'], data=[]]
log.info('Sleeping for {} s...', args.run_repeatedly)
sleep(args.run_repeatedly) # depends on [control=['while'], data=[]] |
def hbas(self):
"""
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
If the "dpm-storage-management" feature is enabled, this property is
`None`.
"""
# We do here some lazy loading.
if not self._hbas:
try:
dpm_sm = self.feature_enabled('dpm-storage-management')
except ValueError:
dpm_sm = False
if not dpm_sm:
self._hbas = HbaManager(self)
return self._hbas | def function[hbas, parameter[self]]:
constant[
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
If the "dpm-storage-management" feature is enabled, this property is
`None`.
]
if <ast.UnaryOp object at 0x7da20c991960> begin[:]
<ast.Try object at 0x7da20c990820>
if <ast.UnaryOp object at 0x7da20c9937c0> begin[:]
name[self]._hbas assign[=] call[name[HbaManager], parameter[name[self]]]
return[name[self]._hbas] | keyword[def] identifier[hbas] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_hbas] :
keyword[try] :
identifier[dpm_sm] = identifier[self] . identifier[feature_enabled] ( literal[string] )
keyword[except] identifier[ValueError] :
identifier[dpm_sm] = keyword[False]
keyword[if] keyword[not] identifier[dpm_sm] :
identifier[self] . identifier[_hbas] = identifier[HbaManager] ( identifier[self] )
keyword[return] identifier[self] . identifier[_hbas] | def hbas(self):
"""
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
If the "dpm-storage-management" feature is enabled, this property is
`None`.
"""
# We do here some lazy loading.
if not self._hbas:
try:
dpm_sm = self.feature_enabled('dpm-storage-management') # depends on [control=['try'], data=[]]
except ValueError:
dpm_sm = False # depends on [control=['except'], data=[]]
if not dpm_sm:
self._hbas = HbaManager(self) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._hbas |
def adapt_files(solver):
"""
Rename and remove files whenever necessary.
"""
print("adapting {0}'s files".format(solver))
root = os.path.join('solvers', solver)
for arch in to_extract[solver]:
arch = os.path.join(root, arch)
extract_archive(arch, solver, put_inside=True)
for fnames in to_move[solver]:
old = os.path.join(root, fnames[0])
new = os.path.join(root, fnames[1])
os.rename(old, new)
for f in to_remove[solver]:
f = os.path.join(root, f)
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f) | def function[adapt_files, parameter[solver]]:
constant[
Rename and remove files whenever necessary.
]
call[name[print], parameter[call[constant[adapting {0}'s files].format, parameter[name[solver]]]]]
variable[root] assign[=] call[name[os].path.join, parameter[constant[solvers], name[solver]]]
for taget[name[arch]] in starred[call[name[to_extract]][name[solver]]] begin[:]
variable[arch] assign[=] call[name[os].path.join, parameter[name[root], name[arch]]]
call[name[extract_archive], parameter[name[arch], name[solver]]]
for taget[name[fnames]] in starred[call[name[to_move]][name[solver]]] begin[:]
variable[old] assign[=] call[name[os].path.join, parameter[name[root], call[name[fnames]][constant[0]]]]
variable[new] assign[=] call[name[os].path.join, parameter[name[root], call[name[fnames]][constant[1]]]]
call[name[os].rename, parameter[name[old], name[new]]]
for taget[name[f]] in starred[call[name[to_remove]][name[solver]]] begin[:]
variable[f] assign[=] call[name[os].path.join, parameter[name[root], name[f]]]
if call[name[os].path.isdir, parameter[name[f]]] begin[:]
call[name[shutil].rmtree, parameter[name[f]]] | keyword[def] identifier[adapt_files] ( identifier[solver] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] ( identifier[solver] ))
identifier[root] = identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[solver] )
keyword[for] identifier[arch] keyword[in] identifier[to_extract] [ identifier[solver] ]:
identifier[arch] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[arch] )
identifier[extract_archive] ( identifier[arch] , identifier[solver] , identifier[put_inside] = keyword[True] )
keyword[for] identifier[fnames] keyword[in] identifier[to_move] [ identifier[solver] ]:
identifier[old] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[fnames] [ literal[int] ])
identifier[new] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[fnames] [ literal[int] ])
identifier[os] . identifier[rename] ( identifier[old] , identifier[new] )
keyword[for] identifier[f] keyword[in] identifier[to_remove] [ identifier[solver] ]:
identifier[f] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[f] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[f] ):
identifier[shutil] . identifier[rmtree] ( identifier[f] )
keyword[else] :
identifier[os] . identifier[remove] ( identifier[f] ) | def adapt_files(solver):
"""
Rename and remove files whenever necessary.
"""
print("adapting {0}'s files".format(solver))
root = os.path.join('solvers', solver)
for arch in to_extract[solver]:
arch = os.path.join(root, arch)
extract_archive(arch, solver, put_inside=True) # depends on [control=['for'], data=['arch']]
for fnames in to_move[solver]:
old = os.path.join(root, fnames[0])
new = os.path.join(root, fnames[1])
os.rename(old, new) # depends on [control=['for'], data=['fnames']]
for f in to_remove[solver]:
f = os.path.join(root, f)
if os.path.isdir(f):
shutil.rmtree(f) # depends on [control=['if'], data=[]]
else:
os.remove(f) # depends on [control=['for'], data=['f']] |
def eager_send(chainlet, chunks):
"""
Eager version of `lazy_send` evaluating the return value immediately
:note: The return value by an ``n`` to ``m`` link is considered fully evaluated.
:param chainlet: the chainlet to receive and return data
:type chainlet: chainlink.ChainLink
:param chunks: the stream slice of data to pass to ``chainlet``
:type chunks: iterable
:return: the resulting stream slice of data returned by ``chainlet``
:rtype: iterable
"""
fork, join = chainlet.chain_fork, chainlet.chain_join
if fork and join:
return _send_n_get_m(chainlet, chunks)
elif fork:
return tuple(_lazy_send_1_get_m(chainlet, chunks))
elif join:
return tuple(_lazy_send_n_get_1(chainlet, chunks))
else:
return tuple(_lazy_send_1_get_1(chainlet, chunks)) | def function[eager_send, parameter[chainlet, chunks]]:
constant[
Eager version of `lazy_send` evaluating the return value immediately
:note: The return value by an ``n`` to ``m`` link is considered fully evaluated.
:param chainlet: the chainlet to receive and return data
:type chainlet: chainlink.ChainLink
:param chunks: the stream slice of data to pass to ``chainlet``
:type chunks: iterable
:return: the resulting stream slice of data returned by ``chainlet``
:rtype: iterable
]
<ast.Tuple object at 0x7da204961900> assign[=] tuple[[<ast.Attribute object at 0x7da2049616c0>, <ast.Attribute object at 0x7da2049623b0>]]
if <ast.BoolOp object at 0x7da204962350> begin[:]
return[call[name[_send_n_get_m], parameter[name[chainlet], name[chunks]]]] | keyword[def] identifier[eager_send] ( identifier[chainlet] , identifier[chunks] ):
literal[string]
identifier[fork] , identifier[join] = identifier[chainlet] . identifier[chain_fork] , identifier[chainlet] . identifier[chain_join]
keyword[if] identifier[fork] keyword[and] identifier[join] :
keyword[return] identifier[_send_n_get_m] ( identifier[chainlet] , identifier[chunks] )
keyword[elif] identifier[fork] :
keyword[return] identifier[tuple] ( identifier[_lazy_send_1_get_m] ( identifier[chainlet] , identifier[chunks] ))
keyword[elif] identifier[join] :
keyword[return] identifier[tuple] ( identifier[_lazy_send_n_get_1] ( identifier[chainlet] , identifier[chunks] ))
keyword[else] :
keyword[return] identifier[tuple] ( identifier[_lazy_send_1_get_1] ( identifier[chainlet] , identifier[chunks] )) | def eager_send(chainlet, chunks):
"""
Eager version of `lazy_send` evaluating the return value immediately
:note: The return value by an ``n`` to ``m`` link is considered fully evaluated.
:param chainlet: the chainlet to receive and return data
:type chainlet: chainlink.ChainLink
:param chunks: the stream slice of data to pass to ``chainlet``
:type chunks: iterable
:return: the resulting stream slice of data returned by ``chainlet``
:rtype: iterable
"""
(fork, join) = (chainlet.chain_fork, chainlet.chain_join)
if fork and join:
return _send_n_get_m(chainlet, chunks) # depends on [control=['if'], data=[]]
elif fork:
return tuple(_lazy_send_1_get_m(chainlet, chunks)) # depends on [control=['if'], data=[]]
elif join:
return tuple(_lazy_send_n_get_1(chainlet, chunks)) # depends on [control=['if'], data=[]]
else:
return tuple(_lazy_send_1_get_1(chainlet, chunks)) |
def add_edges_from(self, ebunch, **kwargs):
"""
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added. Node names can be any hashable python object.
Parameters
----------
ebunch : list, array-like
List of edges to add. Each edge must be of the form of
((start, time_slice), (end, time_slice)).
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0))])
>>> dbn.nodes()
['G', 'I', 'D']
>>> dbn.edges()
[(('D', 1), ('G', 1)),
(('I', 1), ('G', 1)),
(('D', 0), ('G', 0)),
(('I', 0), ('G', 0))]
"""
for edge in ebunch:
self.add_edge(edge[0], edge[1]) | def function[add_edges_from, parameter[self, ebunch]]:
constant[
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added. Node names can be any hashable python object.
Parameters
----------
ebunch : list, array-like
List of edges to add. Each edge must be of the form of
((start, time_slice), (end, time_slice)).
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0))])
>>> dbn.nodes()
['G', 'I', 'D']
>>> dbn.edges()
[(('D', 1), ('G', 1)),
(('I', 1), ('G', 1)),
(('D', 0), ('G', 0)),
(('I', 0), ('G', 0))]
]
for taget[name[edge]] in starred[name[ebunch]] begin[:]
call[name[self].add_edge, parameter[call[name[edge]][constant[0]], call[name[edge]][constant[1]]]] | keyword[def] identifier[add_edges_from] ( identifier[self] , identifier[ebunch] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[edge] keyword[in] identifier[ebunch] :
identifier[self] . identifier[add_edge] ( identifier[edge] [ literal[int] ], identifier[edge] [ literal[int] ]) | def add_edges_from(self, ebunch, **kwargs):
"""
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added. Node names can be any hashable python object.
Parameters
----------
ebunch : list, array-like
List of edges to add. Each edge must be of the form of
((start, time_slice), (end, time_slice)).
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0))])
>>> dbn.nodes()
['G', 'I', 'D']
>>> dbn.edges()
[(('D', 1), ('G', 1)),
(('I', 1), ('G', 1)),
(('D', 0), ('G', 0)),
(('I', 0), ('G', 0))]
"""
for edge in ebunch:
self.add_edge(edge[0], edge[1]) # depends on [control=['for'], data=['edge']] |
def write_psd(self, psds, group=None):
"""Writes PSD for each IFO to file.
Parameters
-----------
psds : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the psd to. Default is ``data_group``.
"""
subgroup = self.data_group + "/{ifo}/psds/0"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo in psds:
self[group.format(ifo=ifo)] = psds[ifo]
self[group.format(ifo=ifo)].attrs['delta_f'] = psds[ifo].delta_f | def function[write_psd, parameter[self, psds, group]]:
constant[Writes PSD for each IFO to file.
Parameters
-----------
psds : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the psd to. Default is ``data_group``.
]
variable[subgroup] assign[=] binary_operation[name[self].data_group + constant[/{ifo}/psds/0]]
if compare[name[group] is constant[None]] begin[:]
variable[group] assign[=] name[subgroup]
for taget[name[ifo]] in starred[name[psds]] begin[:]
call[name[self]][call[name[group].format, parameter[]]] assign[=] call[name[psds]][name[ifo]]
call[call[name[self]][call[name[group].format, parameter[]]].attrs][constant[delta_f]] assign[=] call[name[psds]][name[ifo]].delta_f | keyword[def] identifier[write_psd] ( identifier[self] , identifier[psds] , identifier[group] = keyword[None] ):
literal[string]
identifier[subgroup] = identifier[self] . identifier[data_group] + literal[string]
keyword[if] identifier[group] keyword[is] keyword[None] :
identifier[group] = identifier[subgroup]
keyword[else] :
identifier[group] = literal[string] . identifier[join] ([ identifier[group] , identifier[subgroup] ])
keyword[for] identifier[ifo] keyword[in] identifier[psds] :
identifier[self] [ identifier[group] . identifier[format] ( identifier[ifo] = identifier[ifo] )]= identifier[psds] [ identifier[ifo] ]
identifier[self] [ identifier[group] . identifier[format] ( identifier[ifo] = identifier[ifo] )]. identifier[attrs] [ literal[string] ]= identifier[psds] [ identifier[ifo] ]. identifier[delta_f] | def write_psd(self, psds, group=None):
"""Writes PSD for each IFO to file.
Parameters
-----------
psds : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the psd to. Default is ``data_group``.
"""
subgroup = self.data_group + '/{ifo}/psds/0'
if group is None:
group = subgroup # depends on [control=['if'], data=['group']]
else:
group = '/'.join([group, subgroup])
for ifo in psds:
self[group.format(ifo=ifo)] = psds[ifo]
self[group.format(ifo=ifo)].attrs['delta_f'] = psds[ifo].delta_f # depends on [control=['for'], data=['ifo']] |
def major_complex(network, state):
"""Return the major complex of the network.
Args:
network (Network): The |Network| of interest.
state (tuple[int]): The state of the network (a binary tuple).
Returns:
SystemIrreducibilityAnalysis: The |SIA| for the |Subsystem| with
maximal |big_phi|.
"""
log.info('Calculating major complex...')
result = complexes(network, state)
if result:
result = max(result)
else:
empty_subsystem = Subsystem(network, state, ())
result = _null_sia(empty_subsystem)
log.info("Finished calculating major complex.")
return result | def function[major_complex, parameter[network, state]]:
constant[Return the major complex of the network.
Args:
network (Network): The |Network| of interest.
state (tuple[int]): The state of the network (a binary tuple).
Returns:
SystemIrreducibilityAnalysis: The |SIA| for the |Subsystem| with
maximal |big_phi|.
]
call[name[log].info, parameter[constant[Calculating major complex...]]]
variable[result] assign[=] call[name[complexes], parameter[name[network], name[state]]]
if name[result] begin[:]
variable[result] assign[=] call[name[max], parameter[name[result]]]
call[name[log].info, parameter[constant[Finished calculating major complex.]]]
return[name[result]] | keyword[def] identifier[major_complex] ( identifier[network] , identifier[state] ):
literal[string]
identifier[log] . identifier[info] ( literal[string] )
identifier[result] = identifier[complexes] ( identifier[network] , identifier[state] )
keyword[if] identifier[result] :
identifier[result] = identifier[max] ( identifier[result] )
keyword[else] :
identifier[empty_subsystem] = identifier[Subsystem] ( identifier[network] , identifier[state] ,())
identifier[result] = identifier[_null_sia] ( identifier[empty_subsystem] )
identifier[log] . identifier[info] ( literal[string] )
keyword[return] identifier[result] | def major_complex(network, state):
"""Return the major complex of the network.
Args:
network (Network): The |Network| of interest.
state (tuple[int]): The state of the network (a binary tuple).
Returns:
SystemIrreducibilityAnalysis: The |SIA| for the |Subsystem| with
maximal |big_phi|.
"""
log.info('Calculating major complex...')
result = complexes(network, state)
if result:
result = max(result) # depends on [control=['if'], data=[]]
else:
empty_subsystem = Subsystem(network, state, ())
result = _null_sia(empty_subsystem)
log.info('Finished calculating major complex.')
return result |
def parse(self, generator):
"""Parse an iterable source of strings into a generator"""
gen = iter(generator)
for line in gen:
block = {}
for rule in self.rules:
if rule[0](line):
block = rule[1](line, gen)
break
yield block | def function[parse, parameter[self, generator]]:
constant[Parse an iterable source of strings into a generator]
variable[gen] assign[=] call[name[iter], parameter[name[generator]]]
for taget[name[line]] in starred[name[gen]] begin[:]
variable[block] assign[=] dictionary[[], []]
for taget[name[rule]] in starred[name[self].rules] begin[:]
if call[call[name[rule]][constant[0]], parameter[name[line]]] begin[:]
variable[block] assign[=] call[call[name[rule]][constant[1]], parameter[name[line], name[gen]]]
break
<ast.Yield object at 0x7da20c6c4100> | keyword[def] identifier[parse] ( identifier[self] , identifier[generator] ):
literal[string]
identifier[gen] = identifier[iter] ( identifier[generator] )
keyword[for] identifier[line] keyword[in] identifier[gen] :
identifier[block] ={}
keyword[for] identifier[rule] keyword[in] identifier[self] . identifier[rules] :
keyword[if] identifier[rule] [ literal[int] ]( identifier[line] ):
identifier[block] = identifier[rule] [ literal[int] ]( identifier[line] , identifier[gen] )
keyword[break]
keyword[yield] identifier[block] | def parse(self, generator):
"""Parse an iterable source of strings into a generator"""
gen = iter(generator)
for line in gen:
block = {}
for rule in self.rules:
if rule[0](line):
block = rule[1](line, gen)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rule']]
yield block # depends on [control=['for'], data=['line']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.